Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1480 commits)
  bonding: enable netpoll without checking link status
  xfrm: Refcount destination entry on xfrm_lookup
  net: introduce rx_handler results and logic around that
  bonding: get rid of IFF_SLAVE_INACTIVE netdev->priv_flag
  bonding: wrap slave state work
  net: get rid of multiple bond-related netdevice->priv_flags
  bonding: register slave pointer for rx_handler
  be2net: Bump up the version number
  be2net: Copyright notice change. Update to Emulex instead of ServerEngines
  e1000e: fix kconfig for crc32 dependency
  netfilter ebtables: fix xt_AUDIT to work with ebtables
  xen network backend driver
  bonding: Improve syslog message at device creation time
  bonding: Call netif_carrier_off after register_netdevice
  bonding: Incorrect TX queue offset
  net_sched: fix ip_tos2prio
  xfrm: fix __xfrm_route_forward()
  be2net: Fix UDP packet detected status in RX compl
  Phonet: fix aligned-mode pipe socket buffer header reserve
  netxen: support for GbE port settings
  ...

Fix up conflicts in drivers/staging/brcm80211/brcmsmac/wl_mac80211.c
with the staging updates.
This commit is contained in:
Linus Torvalds 2011-03-16 16:29:25 -07:00
commit 7a6362800c
1143 changed files with 119354 additions and 38141 deletions

View file

@ -35,6 +35,17 @@ Who: Luis R. Rodriguez <lrodriguez@atheros.com>
---------------------------
What: AR9170USB
When: 2.6.40
Why: This driver is deprecated and the firmware is no longer
maintained. The replacement driver "carl9170" has been
around for a while, so the devices are still supported.
Who: Christian Lamparter <chunkeey@googlemail.com>
---------------------------
What: IRQF_SAMPLE_RANDOM
Check: IRQF_SAMPLE_RANDOM
When: July 2009
@ -604,6 +615,13 @@ Who: Jean Delvare <khali@linux-fr.org>
----------------------------
What: xt_connlimit rev 0
When: 2012
Who: Jan Engelhardt <jengelh@medozas.de>
Files: net/netfilter/xt_connlimit.c
----------------------------
What: noswapaccount kernel command line parameter
When: 2.6.40
Why: The original implementation of memsw feature enabled by
@ -619,3 +637,11 @@ Why: The original implementation of memsw feature enabled by
Who: Michal Hocko <mhocko@suse.cz>
----------------------------
What: ipt_addrtype match include file
When: 2012
Why: superseded by xt_addrtype
Who: Florian Westphal <fw@strlen.de>
Files: include/linux/netfilter_ipv4/ipt_addrtype.h
----------------------------

View file

@ -1,4 +1,4 @@
[state: 21-11-2010]
[state: 27-01-2011]
BATMAN-ADV
----------
@ -67,15 +67,16 @@ All mesh wide settings can be found in batman's own interface
folder:
# ls /sys/class/net/bat0/mesh/
# aggregated_ogms bonding fragmentation orig_interval
# vis_mode
# aggregated_ogms gw_bandwidth hop_penalty
# bonding gw_mode orig_interval
# fragmentation gw_sel_class vis_mode
There is a special folder for debugging informations:
# ls /sys/kernel/debug/batman_adv/bat0/
# originators socket transtable_global transtable_local
# vis_data
# gateways socket transtable_global vis_data
# originators softif_neigh transtable_local
Some of the files contain all sort of status information regard-
@ -230,9 +231,8 @@ CONTACT
Please send us comments, experiences, questions, anything :)
IRC: #batman on irc.freenode.org
Mailing-list: b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
(optional subscription at
https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
Mailing-list: b.a.t.m.a.n@open-mesh.org (optional subscription
at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
You can also contact the Authors:

View file

@ -2558,18 +2558,15 @@ enslaved.
16. Resources and Links
=======================
The latest version of the bonding driver can be found in the latest
The latest version of the bonding driver can be found in the latest
version of the linux kernel, found on http://kernel.org
The latest version of this document can be found in either the latest
kernel source (named Documentation/networking/bonding.txt), or on the
bonding sourceforge site:
The latest version of this document can be found in the latest kernel
source (named Documentation/networking/bonding.txt).
http://www.sourceforge.net/projects/bonding
Discussions regarding the bonding driver take place primarily on the
bonding-devel mailing list, hosted at sourceforge.net. If you have
questions or problems, post them to the list. The list address is:
Discussions regarding the usage of the bonding driver take place on the
bonding-devel mailing list, hosted at sourceforge.net. If you have questions or
problems, post them to the list. The list address is:
bonding-devel@lists.sourceforge.net
@ -2578,6 +2575,17 @@ be found at:
https://lists.sourceforge.net/lists/listinfo/bonding-devel
Discussions regarding the developpement of the bonding driver take place
on the main Linux network mailing list, hosted at vger.kernel.org. The list
address is:
netdev@vger.kernel.org
The administrative interface (to subscribe or unsubscribe) can
be found at:
http://vger.kernel.org/vger-lists.html#netdev
Donald Becker's Ethernet Drivers and diag programs may be found at :
- http://web.archive.org/web/*/http://www.scyld.com/network/

View file

@ -280,6 +280,17 @@ tcp_max_orphans - INTEGER
more aggressively. Let me to remind again: each orphan eats
up to ~64K of unswappable memory.
tcp_max_ssthresh - INTEGER
Limited Slow-Start for TCP with large congestion windows (cwnd) defined in
RFC3742. Limited slow-start is a mechanism to limit growth of the cwnd
on the region where cwnd is larger than tcp_max_ssthresh. TCP increases cwnd
by at most tcp_max_ssthresh segments, and by at least tcp_max_ssthresh/2
segments per RTT when the cwnd is above tcp_max_ssthresh.
If TCP connection increased cwnd to thousands (or tens of thousands) segments,
and thousands of packets were being dropped during slow-start, you can set
tcp_max_ssthresh to improve performance for new TCP connection.
Default: 0 (off)
tcp_max_syn_backlog - INTEGER
Maximal number of remembered connection requests, which are
still did not receive an acknowledgment from connecting client.

View file

@ -154,9 +154,28 @@ connections, one per accept()'d socket.
write(cfd, msg, msglen);
}
Connections are established between two endpoints by a "third party"
application. This means that both endpoints are passive; so connect()
is not possible.
Connections are traditionally established between two endpoints by a
"third party" application. This means that both endpoints are passive.
As of Linux kernel version 2.6.39, it is also possible to connect
two endpoints directly, using connect() on the active side. This is
intended to support the newer Nokia Wireless Modem API, as found in
e.g. the Nokia Slim Modem in the ST-Ericsson U8500 platform:
struct sockaddr_spn spn;
int fd;
fd = socket(PF_PHONET, SOCK_SEQPACKET, PN_PROTO_PIPE);
memset(&spn, 0, sizeof(spn));
spn.spn_family = AF_PHONET;
spn.spn_obj = ...;
spn.spn_dev = ...;
spn.spn_resource = 0xD9;
connect(fd, (struct sockaddr *)&spn, sizeof(spn));
/* normal I/O here ... */
close(fd);
WARNING:
When polling a connected pipe socket for writability, there is an
@ -181,45 +200,9 @@ The pipe protocol provides two socket options at the SOL_PNPIPE level:
interface index of the network interface created by PNPIPE_ENCAP,
or zero if encapsulation is off.
Phonet Pipe-controller Implementation
-------------------------------------
Phonet Pipe-controller is enabled by selecting the CONFIG_PHONET_PIPECTRLR Kconfig
option. It is useful when communicating with those Nokia Modems which do not
implement Pipe controller in them e.g. Nokia Slim Modem used in ST-Ericsson
U8500 platform.
The implementation is based on the Data Connection Establishment Sequence
depicted in 'Nokia Wireless Modem API - Wireless_modem_user_guide.pdf'
document.
It allows a phonet sequenced socket (host-pep) to initiate a Pipe connection
between itself and a remote pipe-end point (e.g. modem).
The implementation adds socket options at SOL_PNPIPE level:
PNPIPE_PIPE_HANDLE
It accepts an integer argument for setting value of pipe handle.
PNPIPE_ENABLE accepts one integer value (int). If set to zero, the pipe
is disabled. If the value is non-zero, the pipe is enabled. If the pipe
is not (yet) connected, ENOTCONN is error is returned.
The implementation also adds socket 'connect'. On calling the 'connect', pipe
will be created between the source socket and the destination, and the pipe
state will be set to PIPE_DISABLED.
After a pipe has been created and enabled successfully, the Pipe data can be
exchanged between the host-pep and remote-pep (modem).
User-space would typically follow below sequence with Pipe controller:-
-socket
-bind
-setsockopt for PNPIPE_PIPE_HANDLE
-connect
-setsockopt for PNPIPE_ENCAP_IP
-setsockopt for PNPIPE_ENABLE
PNPIPE_HANDLE is a read-only integer value. It contains the underlying
identifier ("pipe handle") of the pipe. This is only defined for
socket descriptors that are already connected or being connected.
Authors

View file

@ -1231,7 +1231,7 @@ ATHEROS AR9170 WIRELESS DRIVER
M: Christian Lamparter <chunkeey@web.de>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/ar9170
S: Maintained
S: Obsolete
F: drivers/net/wireless/ath/ar9170/
CARL9170 LINUX COMMUNITY WIRELESS DRIVER
@ -1727,6 +1727,7 @@ S: Maintained
F: Documentation/zh_CN/
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
M: Vasanthy Kolluri <vkolluri@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com>
M: David Wang <dwang2@cisco.com>
@ -5169,6 +5170,7 @@ RALINK RT2X00 WIRELESS LAN DRIVER
P: rt2x00 project
M: Ivo van Doorn <IvDoorn@gmail.com>
M: Gertjan van Wingerde <gwingerde@gmail.com>
M: Helmut Schaa <helmut.schaa@googlemail.com>
L: linux-wireless@vger.kernel.org
L: users@rt2x00.serialmonkey.com (moderated for non-subscribers)
W: http://rt2x00.serialmonkey.com/
@ -6092,13 +6094,11 @@ F: sound/soc/codecs/twl4030*
TIPC NETWORK LAYER
M: Jon Maloy <jon.maloy@ericsson.com>
M: Allan Stephens <allan.stephens@windriver.com>
L: tipc-discussion@lists.sourceforge.net
L: netdev@vger.kernel.org (core kernel code)
L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
W: http://tipc.sourceforge.net/
W: http://tipc.cslab.ericsson.net/
T: git git://tipc.cslab.ericsson.net/pub/git/tipc.git
S: Maintained
F: include/linux/tipc*.h
F: include/net/tipc/
F: net/tipc/
TILE ARCHITECTURE

View file

@ -1,11 +1,21 @@
#ifndef __ASM_SH_ETH_H__
#define __ASM_SH_ETH_H__
#include <linux/phy.h>
enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
enum {
SH_ETH_REG_GIGABIT,
SH_ETH_REG_FAST_SH4,
SH_ETH_REG_FAST_SH3_SH2
};
struct sh_eth_plat_data {
int phy;
int edmac_endian;
int register_type;
phy_interface_t phy_interface;
void (*set_mdio_gate)(unsigned long addr);
unsigned char mac_addr[6];
unsigned no_ether_link:1;

View file

@ -78,7 +78,7 @@ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o

835
crypto/authencesn.c Normal file
View file

@ -0,0 +1,835 @@
/*
* authencesn.c - AEAD wrapper for IPsec with extended sequence numbers,
* derived from authenc.c
*
* Copyright (C) 2010 secunet Security Networks AG
* Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
struct authenc_esn_instance_ctx {
struct crypto_ahash_spawn auth;
struct crypto_skcipher_spawn enc;
};
struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
};
struct authenc_esn_request_ctx {
unsigned int cryptlen;
unsigned int headlen;
unsigned int trailen;
struct scatterlist *sg;
struct scatterlist hsg[2];
struct scatterlist tsg[1];
struct scatterlist cipher[2];
crypto_completion_t complete;
crypto_completion_t update_complete;
crypto_completion_t update_complete2;
char tail[];
};
static void authenc_esn_request_complete(struct aead_request *req, int err)
{
if (err != -EINPROGRESS)
aead_request_complete(req, err);
}
static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
unsigned int keylen)
{
unsigned int authkeylen;
unsigned int enckeylen;
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
int err = -EINVAL;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
authkeylen = keylen - enckeylen;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, key, authkeylen);
crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
if (err)
goto out;
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
out:
return err;
badkey:
crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
}
static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
areq_ctx->cryptlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->update_complete2, req);
err = crypto_ahash_update(ahreq);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
areq_ctx->cryptlen,
crypto_aead_authsize(authenc_esn), 1);
out:
authenc_esn_request_complete(req, err);
}
static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
areq_ctx->cryptlen,
crypto_aead_authsize(authenc_esn), 1);
out:
authenc_esn_request_complete(req, err);
}
static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
areq_ctx->cryptlen,
crypto_aead_authsize(authenc_esn), 1);
out:
aead_request_complete(req, err);
}
static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
int err)
{
u8 *ihash;
unsigned int authsize;
struct ablkcipher_request *abreq;
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
areq_ctx->cryptlen);
ahash_request_set_callback(ahreq,
aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->update_complete2, req);
err = crypto_ahash_update(ahreq);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
authsize = crypto_aead_authsize(authenc_esn);
cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
abreq = aead_request_ctx(req);
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
out:
authenc_esn_request_complete(req, err);
}
static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq,
int err)
{
u8 *ihash;
unsigned int authsize;
struct ablkcipher_request *abreq;
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
authsize = crypto_aead_authsize(authenc_esn);
cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
abreq = aead_request_ctx(req);
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
out:
authenc_esn_request_complete(req, err);
}
static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
int err)
{
u8 *ihash;
unsigned int authsize;
struct ablkcipher_request *abreq;
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
authsize = crypto_aead_authsize(authenc_esn);
cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
abreq = aead_request_ctx(req);
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
out:
authenc_esn_request_complete(req, err);
}
static u8 *crypto_authenc_esn_ahash(struct aead_request *req,
unsigned int flags)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
u8 *hash = areq_ctx->tail;
int err;
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1);
ahash_request_set_tfm(ahreq, auth);
err = crypto_ahash_init(ahreq);
if (err)
return ERR_PTR(err);
ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
areq_ctx->update_complete, req);
err = crypto_ahash_update(ahreq);
if (err)
return ERR_PTR(err);
ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
areq_ctx->update_complete2, req);
err = crypto_ahash_update(ahreq);
if (err)
return ERR_PTR(err);
ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
return ERR_PTR(err);
return hash;
}
static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
unsigned int flags)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *dst = req->dst;
struct scatterlist *assoc = req->assoc;
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
unsigned int cryptlen = req->cryptlen;
struct page *dstp;
u8 *vdst;
u8 *hash;
dstp = sg_page(dst);
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
if (ivsize) {
sg_init_table(cipher, 2);
sg_set_buf(cipher, iv, ivsize);
scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
dst = cipher;
cryptlen += ivsize;
}
if (sg_is_last(assoc))
return -EINVAL;
assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;
assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
return -EINVAL;
sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length;
areq_ctx->trailen = assoc1->length;
areq_ctx->sg = dst;
areq_ctx->complete = authenc_esn_geniv_ahash_done;
areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done;
areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2;
hash = crypto_authenc_esn_ahash(req, flags);
if (IS_ERR(hash))
return PTR_ERR(hash);
scatterwalk_map_and_copy(hash, dst, cryptlen,
crypto_aead_authsize(authenc_esn), 1);
return 0;
}
static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
int err)
{
struct aead_request *areq = req->data;
if (!err) {
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct ablkcipher_request *abreq = aead_request_ctx(areq);
u8 *iv = (u8 *)(abreq + 1) +
crypto_ablkcipher_reqsize(ctx->enc);
err = crypto_authenc_esn_genicv(areq, iv, 0);
}
authenc_esn_request_complete(areq, err);
}
static int crypto_authenc_esn_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_ablkcipher *enc = ctx->enc;
struct scatterlist *dst = req->dst;
unsigned int cryptlen = req->cryptlen;
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+ ctx->reqoff);
u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
int err;
ablkcipher_request_set_tfm(abreq, enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
crypto_authenc_esn_encrypt_done, req);
ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn));
err = crypto_ablkcipher_encrypt(abreq);
if (err)
return err;
return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
}
static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req,
int err)
{
struct aead_request *areq = req->data;
if (!err) {
struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
err = crypto_authenc_esn_genicv(areq, greq->giv, 0);
}
authenc_esn_request_complete(areq, err);
}
static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req)
{
struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct aead_request *areq = &req->areq;
struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
u8 *iv = req->giv;
int err;
skcipher_givcrypt_set_tfm(greq, ctx->enc);
skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
crypto_authenc_esn_givencrypt_done, areq);
skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
areq->iv);
skcipher_givcrypt_set_giv(greq, iv, req->seq);
err = crypto_skcipher_givencrypt(greq);
if (err)
return err;
return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
}
static int crypto_authenc_esn_verify(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
u8 *ohash;
u8 *ihash;
unsigned int authsize;
areq_ctx->complete = authenc_esn_verify_ahash_done;
areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP);
if (IS_ERR(ohash))
return PTR_ERR(ohash);
authsize = crypto_aead_authsize(authenc_esn);
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
unsigned int cryptlen)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *src = req->src;
struct scatterlist *assoc = req->assoc;
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
struct page *srcp;
u8 *vsrc;
srcp = sg_page(src);
vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
if (ivsize) {
sg_init_table(cipher, 2);
sg_set_buf(cipher, iv, ivsize);
scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
src = cipher;
cryptlen += ivsize;
}
if (sg_is_last(assoc))
return -EINVAL;
assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;
assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
return -EINVAL;
sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length;
areq_ctx->trailen = assoc1->length;
areq_ctx->sg = src;
areq_ctx->complete = authenc_esn_verify_ahash_done;
areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2;
return crypto_authenc_esn_verify(req);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct ablkcipher_request *abreq = aead_request_ctx(req);
unsigned int cryptlen = req->cryptlen;
unsigned int authsize = crypto_aead_authsize(authenc_esn);
u8 *iv = req->iv;
int err;
if (cryptlen < authsize)
return -EINVAL;
cryptlen -= authsize;
err = crypto_authenc_esn_iverify(req, iv, cryptlen);
if (err)
return err;
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
return crypto_ablkcipher_decrypt(abreq);
}
static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
ctx->auth = auth;
ctx->enc = enc;
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1) +
crypto_ablkcipher_ivsize(enc);
tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) +
ctx->reqoff +
max_t(unsigned int,
crypto_ahash_reqsize(auth) +
sizeof(struct ahash_request),
sizeof(struct skcipher_givcrypt_request) +
crypto_ablkcipher_reqsize(enc));
return 0;
err_free_ahash:
crypto_free_ahash(auth);
return err;
}
static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_ahash(ctx->auth);
crypto_free_ablkcipher(ctx->enc);
}
static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
struct crypto_alg *enc;
struct authenc_esn_instance_ctx *ctx;
const char *enc_name;
int err;
algt = crypto_get_attr_type(tb);
err = PTR_ERR(algt);
if (IS_ERR(algt))
return ERR_PTR(err);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_AHASH_MASK);
if (IS_ERR(auth))
return ERR_CAST(auth);
auth_base = &auth->base;
enc_name = crypto_attr_alg_name(tb[2]);
err = PTR_ERR(enc_name);
if (IS_ERR(enc_name))
goto out_put_auth;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
err = -ENOMEM;
if (!inst)
goto out_put_auth;
ctx = crypto_instance_ctx(inst);
err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
if (err)
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->enc, inst);
err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
if (err)
goto err_drop_auth;
enc = crypto_skcipher_spawn_alg(&ctx->enc);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_driver_name,
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.cra_priority = enc->cra_priority *
10 + auth_base->cra_priority;
inst->alg.cra_blocksize = enc->cra_blocksize;
inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
inst->alg.cra_aead.maxauthsize = auth->digestsize;
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
inst->alg.cra_init = crypto_authenc_esn_init_tfm;
inst->alg.cra_exit = crypto_authenc_esn_exit_tfm;
inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey;
inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt;
inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt;
inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt;
out:
crypto_mod_put(auth_base);
return inst;
err_drop_enc:
crypto_drop_skcipher(&ctx->enc);
err_drop_auth:
crypto_drop_ahash(&ctx->auth);
err_free_inst:
kfree(inst);
out_put_auth:
inst = ERR_PTR(err);
goto out;
}
static void crypto_authenc_esn_free(struct crypto_instance *inst)
{
struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->enc);
crypto_drop_ahash(&ctx->auth);
kfree(inst);
}
static struct crypto_template crypto_authenc_esn_tmpl = {
.name = "authencesn",
.alloc = crypto_authenc_esn_alloc,
.free = crypto_authenc_esn_free,
.module = THIS_MODULE,
};
static int __init crypto_authenc_esn_module_init(void)
{
return crypto_register_template(&crypto_authenc_esn_tmpl);
}
static void __exit crypto_authenc_esn_module_exit(void)
{
crypto_unregister_template(&crypto_authenc_esn_tmpl);
}
module_init(crypto_authenc_esn_module_init);
module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");

View file

@ -2177,7 +2177,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
return;
}
if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
retcode = ERR_PERM;
goto fail;
}

View file

@ -31,6 +31,30 @@
#define VERSION "1.0"
#define ATH3K_DNLOAD 0x01
#define ATH3K_GETSTATE 0x05
#define ATH3K_SET_NORMAL_MODE 0x07
#define ATH3K_GETVERSION 0x09
#define USB_REG_SWITCH_VID_PID 0x0a
#define ATH3K_MODE_MASK 0x3F
#define ATH3K_NORMAL_MODE 0x0E
#define ATH3K_PATCH_UPDATE 0x80
#define ATH3K_SYSCFG_UPDATE 0x40
#define ATH3K_XTAL_FREQ_26M 0x00
#define ATH3K_XTAL_FREQ_40M 0x01
#define ATH3K_XTAL_FREQ_19P2 0x02
#define ATH3K_NAME_LEN 0xFF
struct ath3k_version {
unsigned int rom_version;
unsigned int build_version;
unsigned int ram_version;
unsigned char ref_clock;
unsigned char reserved[0x07];
};
static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 */
@ -42,15 +66,31 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3004) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ath3k_table);
#define BTUSB_ATH3012 0x80
/* This table is to load patch and sysconfig files
* for AR3012 */
static struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */
};
#define USB_REQ_DFU_DNLOAD 1
#define BULK_SIZE 4096
#define FW_HDR_SIZE 20
static int ath3k_load_firmware(struct usb_device *udev,
const struct firmware *firmware)
@ -106,28 +146,265 @@ error:
return err;
}
static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
{
int pipe = 0;
pipe = usb_rcvctrlpipe(udev, 0);
return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
state, 0x01, USB_CTRL_SET_TIMEOUT);
}
static int ath3k_get_version(struct usb_device *udev,
struct ath3k_version *version)
{
int pipe = 0;
pipe = usb_rcvctrlpipe(udev, 0);
return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
sizeof(struct ath3k_version),
USB_CTRL_SET_TIMEOUT);
}
static int ath3k_load_fwfile(struct usb_device *udev,
const struct firmware *firmware)
{
u8 *send_buf;
int err, pipe, len, size, count, sent = 0;
int ret;
count = firmware->size;
send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
if (!send_buf) {
BT_ERR("Can't allocate memory chunk for firmware");
return -ENOMEM;
}
size = min_t(uint, count, FW_HDR_SIZE);
memcpy(send_buf, firmware->data, size);
pipe = usb_sndctrlpipe(udev, 0);
ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD,
USB_TYPE_VENDOR, 0, 0, send_buf,
size, USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
BT_ERR("Can't change to loading configuration err");
kfree(send_buf);
return ret;
}
sent += size;
count -= size;
while (count) {
size = min_t(uint, count, BULK_SIZE);
pipe = usb_sndbulkpipe(udev, 0x02);
memcpy(send_buf, firmware->data + sent, size);
err = usb_bulk_msg(udev, pipe, send_buf, size,
&len, 3000);
if (err || (len != size)) {
BT_ERR("Error in firmware loading err = %d,"
"len = %d, size = %d", err, len, size);
kfree(send_buf);
return err;
}
sent += size;
count -= size;
}
kfree(send_buf);
return 0;
}
static int ath3k_switch_pid(struct usb_device *udev)
{
int pipe = 0;
pipe = usb_sndctrlpipe(udev, 0);
return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID,
USB_TYPE_VENDOR, 0, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
static int ath3k_set_normal_mode(struct usb_device *udev)
{
unsigned char fw_state;
int pipe = 0, ret;
ret = ath3k_get_state(udev, &fw_state);
if (ret < 0) {
BT_ERR("Can't get state to change to normal mode err");
return ret;
}
if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) {
BT_DBG("firmware was already in normal mode");
return 0;
}
pipe = usb_sndctrlpipe(udev, 0);
return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE,
USB_TYPE_VENDOR, 0, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
static int ath3k_load_patch(struct usb_device *udev)
{
unsigned char fw_state;
char filename[ATH3K_NAME_LEN] = {0};
const struct firmware *firmware;
struct ath3k_version fw_version, pt_version;
int ret;
ret = ath3k_get_state(udev, &fw_state);
if (ret < 0) {
BT_ERR("Can't get state to change to load ram patch err");
return ret;
}
if (fw_state & ATH3K_PATCH_UPDATE) {
BT_DBG("Patch was already downloaded");
return 0;
}
ret = ath3k_get_version(udev, &fw_version);
if (ret < 0) {
BT_ERR("Can't get version to change to load ram patch err");
return ret;
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
fw_version.rom_version);
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {
BT_ERR("Patch file not found %s", filename);
return ret;
}
pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8);
pt_version.build_version = *(int *)
(firmware->data + firmware->size - 4);
if ((pt_version.rom_version != fw_version.rom_version) ||
(pt_version.build_version <= fw_version.build_version)) {
BT_ERR("Patch file version did not match with firmware");
release_firmware(firmware);
return -EINVAL;
}
ret = ath3k_load_fwfile(udev, firmware);
release_firmware(firmware);
return ret;
}
static int ath3k_load_syscfg(struct usb_device *udev)
{
unsigned char fw_state;
char filename[ATH3K_NAME_LEN] = {0};
const struct firmware *firmware;
struct ath3k_version fw_version;
int clk_value, ret;
ret = ath3k_get_state(udev, &fw_state);
if (ret < 0) {
BT_ERR("Can't get state to change to load configration err");
return -EBUSY;
}
ret = ath3k_get_version(udev, &fw_version);
if (ret < 0) {
BT_ERR("Can't get version to change to load ram patch err");
return ret;
}
switch (fw_version.ref_clock) {
case ATH3K_XTAL_FREQ_26M:
clk_value = 26;
break;
case ATH3K_XTAL_FREQ_40M:
clk_value = 40;
break;
case ATH3K_XTAL_FREQ_19P2:
clk_value = 19;
break;
default:
clk_value = 0;
break;
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
fw_version.rom_version, clk_value, ".dfu");
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {
BT_ERR("Configuration file not found %s", filename);
return ret;
}
ret = ath3k_load_fwfile(udev, firmware);
release_firmware(firmware);
return ret;
}
static int ath3k_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
const struct firmware *firmware;
struct usb_device *udev = interface_to_usbdev(intf);
int ret;
BT_DBG("intf %p id %p", intf, id);
if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
/* match device ID in ath3k blacklist table */
if (!id->driver_info) {
const struct usb_device_id *match;
match = usb_match_id(intf, ath3k_blist_tbl);
if (match)
id = match;
}
/* load patch and sysconfig files for AR3012 */
if (id->driver_info & BTUSB_ATH3012) {
ret = ath3k_load_patch(udev);
if (ret < 0) {
BT_ERR("Loading patch file failed");
return ret;
}
ret = ath3k_load_syscfg(udev);
if (ret < 0) {
BT_ERR("Loading sysconfig file failed");
return ret;
}
ret = ath3k_set_normal_mode(udev);
if (ret < 0) {
BT_ERR("Set normal mode failed");
return ret;
}
ath3k_switch_pid(udev);
return 0;
}
if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
BT_ERR("Error loading firmware");
return -EIO;
}
if (ath3k_load_firmware(udev, firmware)) {
release_firmware(firmware);
return -EIO;
}
ret = ath3k_load_firmware(udev, firmware);
release_firmware(firmware);
return 0;
return ret;
}
static void ath3k_disconnect(struct usb_interface *intf)

View file

@ -105,6 +105,9 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
/* Atheros 3012 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@ -714,15 +717,11 @@ static int btusb_send_frame(struct sk_buff *skb)
pipe = usb_sndisocpipe(data->udev,
data->isoc_tx_ep->bEndpointAddress);
urb->dev = data->udev;
urb->pipe = pipe;
urb->context = skb;
urb->complete = btusb_isoc_tx_complete;
urb->interval = data->isoc_tx_ep->bInterval;
usb_fill_int_urb(urb, data->udev, pipe,
skb->data, skb->len, btusb_isoc_tx_complete,
skb, data->isoc_tx_ep->bInterval);
urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = skb->data;
urb->transfer_buffer_length = skb->len;
__fill_isoc_descriptor(urb, skb->len,
le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));

View file

@ -398,6 +398,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
hdev->destruct = hci_uart_destruct;
hdev->parent = hu->tty->dev;
hdev->owner = THIS_MODULE;

View file

@ -183,20 +183,15 @@ static int addr4_resolve(struct sockaddr_in *src_in,
{
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct flowi fl;
struct rtable *rt;
struct neighbour *neigh;
int ret;
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = dst_ip;
fl.nl_u.ip4_u.saddr = src_ip;
fl.oif = addr->bound_dev_if;
ret = ip_route_output_key(&init_net, &rt, &fl);
if (ret)
rt = ip_route_output(&init_net, dst_ip, src_ip, 0, addr->bound_dev_if);
if (IS_ERR(rt)) {
ret = PTR_ERR(rt);
goto out;
}
src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = rt->rt_src;
@ -236,28 +231,28 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
struct sockaddr_in6 *dst_in,
struct rdma_dev_addr *addr)
{
struct flowi fl;
struct flowi6 fl6;
struct neighbour *neigh;
struct dst_entry *dst;
int ret;
memset(&fl, 0, sizeof fl);
ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr);
ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr);
fl.oif = addr->bound_dev_if;
memset(&fl6, 0, sizeof fl6);
ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
fl6.flowi6_oif = addr->bound_dev_if;
dst = ip6_route_output(&init_net, NULL, &fl);
dst = ip6_route_output(&init_net, NULL, &fl6);
if ((ret = dst->error))
goto put;
if (ipv6_addr_any(&fl.fl6_src)) {
if (ipv6_addr_any(&fl6.saddr)) {
ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
&fl.fl6_dst, 0, &fl.fl6_src);
&fl6.daddr, 0, &fl6.saddr);
if (ret)
goto put;
src_in->sin6_family = AF_INET6;
ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src);
ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
}
if (dst->dev->flags & IFF_LOOPBACK) {

View file

@ -338,23 +338,11 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
__be16 peer_port, u8 tos)
{
struct rtable *rt;
struct flowi fl = {
.oif = 0,
.nl_u = {
.ip4_u = {
.daddr = peer_ip,
.saddr = local_ip,
.tos = tos}
},
.proto = IPPROTO_TCP,
.uli_u = {
.ports = {
.sport = local_port,
.dport = peer_port}
}
};
if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip,
peer_port, local_port, IPPROTO_TCP,
tos, 0);
if (IS_ERR(rt))
return NULL;
return rt;
}

View file

@ -315,23 +315,11 @@ static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
__be16 peer_port, u8 tos)
{
struct rtable *rt;
struct flowi fl = {
.oif = 0,
.nl_u = {
.ip4_u = {
.daddr = peer_ip,
.saddr = local_ip,
.tos = tos}
},
.proto = IPPROTO_TCP,
.uli_u = {
.ports = {
.sport = local_port,
.dport = peer_port}
}
};
if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip,
peer_port, local_port, IPPROTO_TCP,
tos, 0);
if (IS_ERR(rt))
return NULL;
return rt;
}

View file

@ -153,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
nesdev, nesdev->netdev[0]->name);
netdev = nesdev->netdev[0];
nesvnic = netdev_priv(netdev);
is_bonded = (netdev->master == event_netdev);
is_bonded = netif_is_bond_slave(netdev) &&
(netdev->master == event_netdev);
if ((netdev == event_netdev) || is_bonded) {
if (nesvnic->rdma_enabled == 0) {
nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"

View file

@ -1104,21 +1104,19 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex)
{
struct rtable *rt;
struct flowi fl;
struct neighbour *neigh;
int rc = arpindex;
struct net_device *netdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = htonl(dst_ip);
if (ip_route_output_key(&init_net, &rt, &fl)) {
rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0);
if (IS_ERR(rt)) {
printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
__func__, dst_ip);
return rc;
}
if (nesvnic->netdev->master)
if (netif_is_bond_slave(netdev))
netdev = nesvnic->netdev->master;
else
netdev = nesvnic->netdev;

View file

@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
return;
spin_lock(&receiving_list_lock);

View file

@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig"
config AX88796
tristate "ASIX AX88796 NE2000 clone support"
depends on ARM || MIPS || SUPERH
select CRC32
select MII
select PHYLIB
select MDIO_BITBANG
help
AX88796 driver, using platform bus to provide
chip detection and resources
@ -1498,7 +1498,7 @@ config FORCEDETH
config CS89x0
tristate "CS89x0 support"
depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
|| ARCH_IXDP2X01 || MACH_MX31ADS)
|| ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the
@ -1512,7 +1512,7 @@ config CS89x0
config CS89x0_NONISA_IRQ
def_bool y
depends on CS89x0 != n
depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS
depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
@ -1944,7 +1944,8 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
@ -2007,6 +2008,15 @@ config BCM63XX_ENET
This driver supports the ethernet MACs in the Broadcom 63xx
MIPS chipset family (BCM63XX).
config FTMAC100
tristate "Faraday FTMAC100 10/100 Ethernet support"
depends on ARM
select MII
help
This driver supports the FTMAC100 10/100 Ethernet controller
from Faraday. It is used on Faraday A320, Andes AG101 and some
other ARM/NDS32 SoC's.
source "drivers/net/fs_enet/Kconfig"
source "drivers/net/octeon/Kconfig"
@ -2098,7 +2108,9 @@ config E1000
config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
select CRC32
depends on PCI && (!SPARC32 || BROKEN)
select CRC32
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@ -2235,15 +2247,6 @@ config R8169
To compile this driver as a module, choose M here: the module
will be called r8169. This is recommended.
config R8169_VLAN
bool "VLAN support"
depends on R8169 && VLAN_8021Q
---help---
Say Y here for the r8169 driver to support the functions required
by the kernel 802.1Q code.
If in doubt, say Y.
config SB1250_MAC
tristate "SB1250 Gigabit Ethernet support"
depends on SIBYTE_SB1xxx_SOC
@ -2594,14 +2597,9 @@ config CHELSIO_T1_1G
Enables support for Chelsio's gigabit Ethernet PCI cards. If you
are using only 10G cards say 'N' here.
config CHELSIO_T3_DEPENDS
tristate
depends on PCI && INET
default y
config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support"
depends on CHELSIO_T3_DEPENDS
depends on PCI && INET
select FW_LOADER
select MDIO
help
@ -2619,14 +2617,9 @@ config CHELSIO_T3
To compile this driver as a module, choose M here: the module
will be called cxgb3.
config CHELSIO_T4_DEPENDS
tristate
depends on PCI && INET
default y
config CHELSIO_T4
tristate "Chelsio Communications T4 Ethernet support"
depends on CHELSIO_T4_DEPENDS
depends on PCI
select FW_LOADER
select MDIO
help
@ -2644,14 +2637,9 @@ config CHELSIO_T4
To compile this driver as a module choose M here; the module
will be called cxgb4.
config CHELSIO_T4VF_DEPENDS
tristate
depends on PCI && INET
default y
config CHELSIO_T4VF
tristate "Chelsio Communications T4 Virtual Function Ethernet support"
depends on CHELSIO_T4VF_DEPENDS
depends on PCI
help
This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
adapters with PCI-E SR-IOV Virtual Functions.
@ -2966,12 +2954,38 @@ config XEN_NETDEV_FRONTEND
select XEN_XENBUS_FRONTEND
default y
help
The network device frontend driver allows the kernel to
access network devices exported exported by a virtual
machine containing a physical network device driver. The
frontend driver is intended for unprivileged guest domains;
if you are compiling a kernel for a Xen guest, you almost
certainly want to enable this.
This driver provides support for Xen paravirtual network
devices exported by a Xen network driver domain (often
domain 0).
The corresponding Linux backend driver is enabled by the
CONFIG_XEN_NETDEV_BACKEND option.
If you are compiling a kernel for use as Xen guest, you
should say Y here. To compile this driver as a module, chose
M here: the module will be called xen-netfront.
config XEN_NETDEV_BACKEND
tristate "Xen backend network device"
depends on XEN_BACKEND
help
This driver allows the kernel to act as a Xen network driver
domain which exports paravirtual network devices to other
Xen domains. These devices can be accessed by any operating
system that implements a compatible front end.
The corresponding Linux frontend driver is enabled by the
CONFIG_XEN_NETDEV_FRONTEND configuration option.
The backend driver presents a standard network device
endpoint for each paravirtual network device to the driver
domain network stack. These can then be bridged or routed
etc in order to provide full network connectivity.
If you are compiling a kernel to run in a Xen network driver
domain (often this is domain 0) you should say Y here. To
compile this driver as a module, chose M here: the module
will be called xen-netback.
config ISERIES_VETH
tristate "iSeries Virtual Ethernet driver support"

View file

@ -147,6 +147,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_AX88796) += ax88796.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
obj-$(CONFIG_FTMAC100) += ftmac100.o
obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
@ -171,6 +172,7 @@ obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o

View file

@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
*/
static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
{
u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
~GIGA_CR_1000T_SPEED_MASK;
@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
}
if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
return -1;
return 0;
}
@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
"Error Setting up Auto-Negotiation\n");
return ret_val;
}
mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
break;
case MEDIA_TYPE_100M_FULL:
mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
break;
case MEDIA_TYPE_100M_HALF:
mii_bmcr_data |= BMCR_SPEED_100;
mii_bmcr_data |= BMCR_SPEED100;
break;
case MEDIA_TYPE_10M_FULL:
mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
mii_bmcr_data |= BMCR_FULLDPLX;
break;
case MEDIA_TYPE_10M_HALF:
mii_bmcr_data |= BMCR_SPEED_10;
break;
default:
if (netif_msg_link(adapter))
@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
err = atl1c_phy_setup_adv(hw);
if (err)
return err;
mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
}

View file

@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
#define REG_DEBUG_DATA0 0x1900
#define REG_DEBUG_DATA1 0x1904
/* PHY Control Register */
#define MII_BMCR 0x00
#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
#define BMCR_POWER_DOWN 0x0800 /* Power down */
#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define BMCR_SPEED_MASK 0x2040
#define BMCR_SPEED_1000 0x0040
#define BMCR_SPEED_100 0x2000
#define BMCR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_BMSR 0x01
#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
#define MII_PHYSID1 0x02
#define MII_PHYSID2 0x03
#define L1D_MPW_PHYID1 0xD01C /* V7 */
#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
#define L1D_MPW_PHYID3 0xD01E /* V8 */
/* Autoneg Advertisement Register */
#define MII_ADVERTISE 0x04
#define ADVERTISE_SPEED_MASK 0x01E0
#define ADVERTISE_DEFAULT_CAP 0x0DE0
#define ADVERTISE_DEFAULT_CAP \
(ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
/* 1000BASE-T Control Register */
#define MII_GIGA_CR 0x09
#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */

View file

@ -1102,10 +1102,10 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
DEVICE_CTRL_MAX_PAYLOAD_MASK;
hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
DEVICE_CTRL_MAX_RREQ_SZ_MASK;
hw->dmar_block = min(max_pay_load, hw->dmar_block);
hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
TXQ_NUM_TPD_BURST_SHIFT;
@ -2718,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
goto err_reset;
}
device_init_wakeup(&pdev->dev, 1);
/* reset the controller to
* put the device in a known good starting state */
err = atl1c_phy_init(&adapter->hw);

View file

@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
ecmd->advertising = hw->autoneg_advertised |
ADVERTISED_TP | ADVERTISED_Autoneg;
adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
if (hw->autoneg_advertised & ADVERTISE_10_HALF)
adv4 |= MII_AR_10T_HD_CAPS;
adv4 |= ADVERTISE_10HALF;
if (hw->autoneg_advertised & ADVERTISE_10_FULL)
adv4 |= MII_AR_10T_FD_CAPS;
adv4 |= ADVERTISE_10FULL;
if (hw->autoneg_advertised & ADVERTISE_100_HALF)
adv4 |= MII_AR_100TX_HD_CAPS;
adv4 |= ADVERTISE_100HALF;
if (hw->autoneg_advertised & ADVERTISE_100_FULL)
adv4 |= MII_AR_100TX_FD_CAPS;
adv4 |= ADVERTISE_100FULL;
if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
adv9 |= MII_AT001_CR_1000T_FD_CAPS;
adv9 |= ADVERTISE_1000FULL;
if (adv4 != hw->mii_autoneg_adv_reg ||
adv9 != hw->mii_1000t_ctrl_reg) {

View file

@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T control Register (Address 9).
*/
mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
/*
@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
*/
switch (hw->media_type) {
case MEDIA_TYPE_AUTO_SENSOR:
mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
MII_AR_10T_FD_CAPS |
MII_AR_100TX_HD_CAPS |
MII_AR_100TX_FD_CAPS);
hw->autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL |
ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
mii_autoneg_adv_reg |= ADVERTISE_ALL;
hw->autoneg_advertised = ADVERTISE_ALL;
if (hw->nic_type == athr_l1e) {
mii_1000t_ctrl_reg |=
MII_AT001_CR_1000T_FD_CAPS;
mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
hw->autoneg_advertised |= ADVERTISE_1000_FULL;
}
break;
case MEDIA_TYPE_100M_FULL:
mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
mii_autoneg_adv_reg |= ADVERTISE_100FULL;
hw->autoneg_advertised = ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_HALF:
mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
hw->autoneg_advertised = ADVERTISE_100_HALF;
break;
case MEDIA_TYPE_10M_FULL:
mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
hw->autoneg_advertised = ADVERTISE_10_FULL;
break;
default:
mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
hw->autoneg_advertised = ADVERTISE_10_HALF;
break;
}
/* flow control fixed to enable all */
mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
return ret_val;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
int ret_val;
u16 phy_data;
phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
if (ret_val) {
@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
return err;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
err = atl1e_write_phy_reg(hw, MII_AT001_CR,
err = atl1e_write_phy_reg(hw, MII_CTRL1000,
hw->mii_1000t_ctrl_reg);
if (err)
return err;
}
err = atl1e_write_phy_reg(hw, MII_BMCR,
MII_CR_RESET | MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
return err;
}

View file

@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
/***************************** MII definition ***************************************/
/* PHY Common Register */
#define MII_BMCR 0x00
#define MII_BMSR 0x01
#define MII_PHYSID1 0x02
#define MII_PHYSID2 0x03
#define MII_ADVERTISE 0x04
#define MII_LPA 0x05
#define MII_EXPANSION 0x06
#define MII_AT001_CR 0x09
#define MII_AT001_SR 0x0A
#define MII_AT001_ESR 0x0F
#define MII_AT001_PSCR 0x10
#define MII_AT001_PSSR 0x11
#define MII_INT_CTRL 0x12
#define MII_INT_STATUS 0x13
#define MII_SMARTSPEED 0x14
#define MII_RERRCOUNTER 0x15
#define MII_SREVISION 0x16
#define MII_RESV1 0x17
#define MII_LBRERROR 0x18
#define MII_PHYADDR 0x19
#define MII_RESV2 0x1a
#define MII_TPISTATUS 0x1b
#define MII_NCONFIG 0x1c
#define MII_DBG_ADDR 0x1D
#define MII_DBG_DATA 0x1E
/* PHY Control Register */
#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define MII_CR_SPEED_MASK 0x2040
#define MII_CR_SPEED_1000 0x0040
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
/* Link partner ability register. */
#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
#define MII_LPA_PAUSE 0x0400 /* PAUSE */
#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
#define MII_LPA_NPAGE 0x8000 /* Next page bit */
/* Autoneg Advertisement Register */
#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
#define MII_AR_SPEED_MASK 0x01E0
#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
#define MII_AR_DEFAULT_CAP_MASK 0
/* 1000BASE-T Control Register */
#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
/* 0=DTE device */
#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
/* 0=Configure PHY as Slave */
#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
/* 0=Automatic Master/Slave config */
#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
/* 1000BASE-T Status Register */
#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
/* Extended Status Register */
#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
#define MII_AT001_CR_1000T_SPEED_MASK \
(ADVERTISE_1000FULL | ADVERTISE_1000HALF)
#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
/* AT001 PHY Specific Control Register */
#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */

View file

@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
hw->revision_id = pdev->revision;
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
@ -932,11 +932,11 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
DEVICE_CTRL_MAX_PAYLOAD_MASK;
hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
DEVICE_CTRL_MAX_RREQ_SZ_MASK;
hw->dmar_block = min(max_pay_load, hw->dmar_block);
hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
if (hw->nic_type != athr_l2e_revB)
AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
mii_advertise_data = MII_AR_10T_HD_CAPS;
mii_advertise_data = ADVERTISE_10HALF;
if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) ||
if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
(atl1e_write_phy_reg(hw,
MII_ADVERTISE, mii_advertise_data) != 0) ||
(atl1e_phy_commit(hw)) != 0) {

View file

@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->wol = 0;
device_set_wakeup_enable(&adapter->pdev->dev, false);
adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
adapter->ict = 50000; /* 100ms */
adapter->link_speed = SPEED_0; /* hardware init */
@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
}
#ifdef CONFIG_PM
static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
static int atl1_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u32 ctrl = 0;
u32 wufc = adapter->wol;
u32 val;
int retval;
u16 speed;
u16 duplex;
@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
if (netif_running(netdev))
atl1_down(adapter);
retval = pci_save_state(pdev);
if (retval)
return retval;
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
val = ctrl & BMSR_LSTATUS;
if (val)
wufc &= ~ATLX_WUFC_LNKC;
if (!wufc)
goto disable_wol;
if (val && wufc) {
if (val) {
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
if (val) {
if (netif_msg_ifdown(adapter))
@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
goto exit;
}
if (!val && wufc) {
} else {
ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
ioread32(hw->hw_addr + REG_WOL_CTRL);
iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
ioread32(hw->hw_addr + REG_MAC_CTRL);
hw->phy_configured = false;
pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
goto exit;
}
disable_wol:
return 0;
disable_wol:
iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
ioread32(hw->hw_addr + REG_WOL_CTRL);
ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@ -2822,37 +2816,17 @@ disable_wol:
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
hw->phy_configured = false;
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
exit:
if (netif_running(netdev))
pci_disable_msi(adapter->pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int atl1_resume(struct pci_dev *pdev)
static int atl1_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
u32 err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
if (netif_msg_ifup(adapter))
dev_printk(KERN_DEBUG, &pdev->dev,
"error enabling pci device\n");
return err;
}
pci_set_master(pdev);
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
atl1_reset_hw(&adapter->hw);
@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
return 0;
}
static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
#define ATL1_PM_OPS (&atl1_pm_ops)
#else
#define atl1_suspend NULL
#define atl1_resume NULL
static int atl1_suspend(struct device *dev) { return 0; }
#define ATL1_PM_OPS NULL
#endif
static void atl1_shutdown(struct pci_dev *pdev)
{
#ifdef CONFIG_PM
atl1_suspend(pdev, PMSG_SUSPEND);
#endif
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
atl1_suspend(&pdev->dev);
pci_wake_from_d3(pdev, adapter->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
.id_table = atl1_pci_tbl,
.probe = atl1_probe,
.remove = __devexit_p(atl1_remove),
.suspend = atl1_suspend,
.resume = atl1_resume,
.shutdown = atl1_shutdown
.shutdown = atl1_shutdown,
.driver.pm = ATL1_PM_OPS,
};
/*
@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
adapter->wol = 0;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= ATLX_WUFC_MAG;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
}

View file

@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
hw->revision_id = pdev->revision;
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
adapter->wol = 0;

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2005 - 2010 ServerEngines
* Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -8,11 +8,11 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
* linux-drivers@emulex.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
#ifndef BE_H
@ -33,7 +33,7 @@
#include "be_hw.h"
#define DRV_VER "2.103.175u"
#define DRV_VER "4.0.100u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@ -67,7 +67,7 @@ static inline char *nic_name(struct pci_dev *pdev)
}
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN 64
#define BE_HDR_LEN ((u16) 64)
#define BE_MAX_JUMBO_FRAME_SIZE 9018
#define BE_MIN_MTU 256
@ -211,18 +211,40 @@ struct be_rx_stats {
u32 rx_fps; /* Rx frags per second */
};
struct be_rx_compl_info {
u32 rss_hash;
u16 vid;
u16 pkt_size;
u16 rxq_idx;
u16 mac_id;
u8 vlanf;
u8 num_rcvd;
u8 err;
u8 ipf;
u8 tcpf;
u8 udpf;
u8 ip_csum;
u8 l4_csum;
u8 ipv6;
u8 vtm;
u8 pkt_type;
};
struct be_rx_obj {
struct be_adapter *adapter;
struct be_queue_info q;
struct be_queue_info cq;
struct be_rx_compl_info rxcp;
struct be_rx_page_info page_info_tbl[RX_Q_LEN];
struct be_eq_obj rx_eq;
struct be_rx_stats stats;
u8 rss_id;
bool rx_post_starved; /* Zero rx frags have been posted to BE */
u16 last_frag_index;
u16 rsvd;
u32 cache_line_barrier[15];
u32 cache_line_barrier[16];
};
struct be_drv_stats {
u8 be_on_die_temperature;
};
struct be_vf_cfg {
@ -234,6 +256,7 @@ struct be_vf_cfg {
};
#define BE_INVALID_PMAC_ID 0xffffffff
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@ -269,6 +292,7 @@ struct be_adapter {
u32 big_page_size; /* Compounded page size shared by rx wrbs */
u8 msix_vec_next_idx;
struct be_drv_stats drv_stats;
struct vlan_group *vlan_grp;
u16 vlans_added;
@ -281,6 +305,7 @@ struct be_adapter {
struct be_dma_mem stats_cmd;
/* Work queue used to perform periodic tasks like getting statistics */
struct delayed_work work;
u16 work_counter;
/* Ethtool knobs and info */
bool rx_csum; /* BE card must perform rx-checksumming */
@ -298,7 +323,7 @@ struct be_adapter {
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool ue_detected;
bool stats_ioctl_sent;
bool stats_cmd_sent;
int link_speed;
u8 port_type;
u8 transceiver;
@ -307,10 +332,13 @@ struct be_adapter {
u32 flash_status;
struct completion flash_compl;
bool be3_native;
bool sriov_enabled;
struct be_vf_cfg vf_cfg[BE_MAX_VF];
u8 is_virtfn;
u32 sli_family;
u8 hba_port_num;
u16 pvid;
};
#define be_physfn(adapter) (!adapter->is_virtfn)
@ -450,9 +478,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
mac[5] = (u8)(addr & 0xFF);
mac[4] = (u8)((addr >> 8) & 0xFF);
mac[3] = (u8)((addr >> 16) & 0xFF);
mac[2] = 0xC9;
mac[1] = 0x00;
mac[0] = 0x00;
/* Use the OUI from the current MAC address */
memcpy(mac, adapter->netdev->dev_addr, 3);
}
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2005 - 2010 ServerEngines
* Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -8,21 +8,30 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
* linux-drivers@emulex.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
#include "be.h"
#include "be_cmds.h"
/* Must be a power of 2 or else MODULO will BUG_ON */
static int be_get_temp_freq = 32;
static void be_mcc_notify(struct be_adapter *adapter)
{
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
if (adapter->eeh_err) {
dev_info(&adapter->pdev->dev,
"Error in Card Detected! Cannot issue commands\n");
return;
}
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
be_dws_le_to_cpu(&resp->hw_stats,
sizeof(resp->hw_stats));
netdev_stats_update(adapter);
adapter->stats_ioctl_sent = false;
adapter->stats_cmd_sent = false;
}
} else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
(compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
{
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
adapter->recommended_prio &= ~VLAN_PRIO_MASK;
adapter->recommended_prio =
evt->reco_default_priority << VLAN_PRIO_SHIFT;
}
@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
}
}
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
struct be_async_event_grp5_pvid_state *evt)
{
if (evt->enabled)
adapter->pvid = evt->tag;
else
adapter->pvid = 0;
}
static void be_async_grp5_evt_process(struct be_adapter *adapter,
u32 trailer, struct be_mcc_compl *evt)
{
@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
be_async_grp5_qos_speed_process(adapter,
(struct be_async_event_grp5_qos_link_speed *)evt);
break;
case ASYNC_EVENT_PVID_STATE:
be_async_grp5_pvid_state_process(adapter,
(struct be_async_event_grp5_pvid_state *)evt);
break;
default:
dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
break;
@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
int i, num, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
if (adapter->eeh_err)
return -EIO;
for (i = 0; i < mcc_timeout; i++) {
num = be_process_mcc(adapter, &status);
if (num)
@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
int msecs = 0;
u32 ready;
if (adapter->eeh_err) {
dev_err(&adapter->pdev->dev,
"Error detected in card.Cannot issue commands\n");
return -EIO;
}
do {
ready = ioread32(db);
if (ready == 0xffffffff) {
@ -598,7 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
/* Uses synchronous MCCQ */
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
u32 if_id, u32 *pmac_id)
u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_add *req;
@ -619,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id);
memcpy(req->mac_address, mac_addr, ETH_ALEN);
@ -634,7 +668,7 @@ err:
}
/* Uses synchronous MCCQ */
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_del *req;
@ -655,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
req->hdr.domain = dom;
req->if_id = cpu_to_le32(if_id);
req->pmac_id = cpu_to_le32(pmac_id);
@ -691,7 +726,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (lancer_chip(adapter)) {
req->hdr.version = 1;
req->hdr.version = 2;
req->page_size = 1; /* 1 for 4K */
AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
coalesce_wm);
@ -827,6 +862,12 @@ int be_cmd_txq_create(struct be_adapter *adapter,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
sizeof(*req));
if (lancer_chip(adapter)) {
req->hdr.version = 1;
AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
adapter->if_handle);
}
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req->ulp_num = BE_ULP1_NUM;
req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@ -995,7 +1036,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
}
/* Uses mbox */
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_destroy *req;
@ -1016,6 +1057,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
status = be_mbox_notify_wait(adapter);
@ -1036,6 +1078,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_sge *sge;
int status = 0;
if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
be_cmd_get_die_temperature(adapter);
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@ -1056,7 +1101,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
sge->len = cpu_to_le32(nonemb_cmd->size);
be_mcc_notify(adapter);
adapter->stats_ioctl_sent = true;
adapter->stats_cmd_sent = true;
err:
spin_unlock_bh(&adapter->mcc_lock);
@ -1103,6 +1148,44 @@ err:
return status;
}
/* Uses synchronous mcc */
int be_cmd_get_die_temperature(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status;
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
goto err;
}
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_cntl_addnl_attribs *resp =
embedded_payload(wrb);
adapter->drv_stats.be_on_die_temperature =
resp->on_die_temperature;
}
/* If IOCTL fails once, do not bother issuing it again */
else
be_get_temp_freq = 0;
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
/* Uses Mbox */
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
{
@ -1868,8 +1951,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
OPCODE_COMMON_SET_QOS, sizeof(*req));
req->hdr.domain = domain;
req->valid_bits = BE_QOS_BITS_NIC;
req->max_bps_nic = bps;
req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
req->max_bps_nic = cpu_to_le32(bps);
status = be_mcc_notify_wait(adapter);
@ -1877,3 +1960,96 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_cntl_attribs *req;
struct be_cmd_resp_cntl_attribs *resp;
struct be_sge *sge;
int status;
int payload_len = max(sizeof(*req), sizeof(*resp));
struct mgmt_controller_attrib *attribs;
struct be_dma_mem attribs_cmd;
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
&attribs_cmd.dma);
if (!attribs_cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure\n");
return -ENOMEM;
}
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
if (!wrb) {
status = -EBUSY;
goto err;
}
req = attribs_cmd.va;
sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, payload_len, false, 1,
OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(attribs_cmd.size);
status = be_mbox_notify_wait(adapter);
if (!status) {
attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
sizeof(struct be_cmd_resp_hdr));
adapter->hba_port_num = attribs->hba_attribs.phy_port;
}
err:
mutex_unlock(&adapter->mbox_lock);
pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
attribs_cmd.dma);
return status;
}
/* Uses mbox */
int be_cmd_check_native_mode(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_func_cap *req;
int status;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
if (!wrb) {
status = -EBUSY;
goto err;
}
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
CAPABILITY_BE3_NATIVE_ERX_API);
req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
adapter->be3_native = le32_to_cpu(resp->cap_flags) &
CAPABILITY_BE3_NATIVE_ERX_API;
}
err:
mutex_unlock(&adapter->mbox_lock);
return status;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2005 - 2010 ServerEngines
* Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -8,11 +8,11 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
* linux-drivers@emulex.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
/*
@ -88,6 +88,7 @@ struct be_mcc_compl {
#define ASYNC_EVENT_CODE_GRP_5 0x5
#define ASYNC_EVENT_QOS_SPEED 0x1
#define ASYNC_EVENT_COS_PRIORITY 0x2
#define ASYNC_EVENT_PVID_STATE 0x3
struct be_async_event_trailer {
u32 code;
};
@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
struct be_async_event_trailer trailer;
} __packed;
/* When the event code of an async trailer is GRP5 and event type is
* PVID state, the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_pvid_state {
u8 enabled;
u8 rsvd0;
u16 tag;
u32 event_tag;
u32 rsvd1;
struct be_async_event_trailer trailer;
} __packed;
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@ -156,6 +169,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_SET_QOS 28
#define OPCODE_COMMON_MCC_CREATE_EXT 90
#define OPCODE_COMMON_SEEPROM_READ 30
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
#define OPCODE_COMMON_NTWK_RX_FILTER 34
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@ -176,6 +190,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
#define OPCODE_ETH_RSS_CONFIG 1
#define OPCODE_ETH_ACPI_CONFIG 2
@ -415,7 +431,7 @@ struct be_cmd_resp_mcc_create {
/* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field */
struct amap_tx_context {
u8 rsvd0[16]; /* dword 0 */
u8 if_id[16]; /* dword 0 */
u8 tx_ring_size[4]; /* dword 0 */
u8 rsvd1[26]; /* dword 0 */
u8 pci_func_id[8]; /* dword 1 */
@ -503,7 +519,8 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN = 0x100,
BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
BE_IF_FLAGS_MULTICAST = 0x1000
};
/* An RX interface is an object with one or more MAC addresses and
@ -619,7 +636,10 @@ struct be_rxf_stats {
u32 rx_drops_invalid_ring; /* dword 145*/
u32 forwarded_packets; /* dword 146*/
u32 rx_drops_mtu; /* dword 147*/
u32 rsvd0[15];
u32 rsvd0[7];
u32 port0_jabber_events;
u32 port1_jabber_events;
u32 rsvd1[6];
};
struct be_erx_stats {
@ -630,11 +650,16 @@ struct be_erx_stats {
u32 debug_pmem_pbuf_dealloc; /* dword 47*/
};
struct be_pmem_stats {
u32 eth_red_drops;
u32 rsvd[4];
};
struct be_hw_stats {
struct be_rxf_stats rxf;
u32 rsvd[48];
struct be_erx_stats erx;
u32 rsvd1[6];
struct be_pmem_stats pmem;
};
struct be_cmd_req_get_stats {
@ -647,6 +672,20 @@ struct be_cmd_resp_get_stats {
struct be_hw_stats hw_stats;
};
struct be_cmd_req_get_cntl_addnl_attribs {
struct be_cmd_req_hdr hdr;
u8 rsvd[8];
};
struct be_cmd_resp_get_cntl_addnl_attribs {
struct be_cmd_resp_hdr hdr;
u16 ipl_file_number;
u8 ipl_file_version;
u8 rsvd0;
u8 on_die_temperature; /* in degrees centigrade*/
u8 rsvd1[3];
};
struct be_cmd_req_vlan_config {
struct be_cmd_req_hdr hdr;
u8 interface_id;
@ -994,17 +1033,47 @@ struct be_cmd_resp_set_qos {
u32 rsvd;
};
/*********************** Controller Attributes ***********************/
struct be_cmd_req_cntl_attribs {
struct be_cmd_req_hdr hdr;
};
struct be_cmd_resp_cntl_attribs {
struct be_cmd_resp_hdr hdr;
struct mgmt_controller_attrib attribs;
};
/*********************** Set driver function ***********************/
#define CAPABILITY_SW_TIMESTAMPS 2
#define CAPABILITY_BE3_NATIVE_ERX_API 4
struct be_cmd_req_set_func_cap {
struct be_cmd_req_hdr hdr;
u32 valid_cap_flags;
u32 cap_flags;
u8 rsvd[212];
};
struct be_cmd_resp_set_func_cap {
struct be_cmd_resp_hdr hdr;
u32 valid_cap_flags;
u32 cap_flags;
u8 rsvd[212];
};
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
u32 if_id, u32 *pmac_id);
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
u32 if_id, u32 *pmac_id, u32 domain);
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
u32 pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, bool pmac_invalid,
u32 *if_handle, u32 *pmac_id, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
extern int be_cmd_cq_create(struct be_adapter *adapter,
@ -1076,4 +1145,7 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
struct be_dma_mem *cmd);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
extern void be_detect_dump_ue(struct be_adapter *adapter);
extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_check_native_mode(struct be_adapter *adapter);

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2005 - 2010 ServerEngines
* Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -8,11 +8,11 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
* linux-drivers@emulex.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
#include "be.h"
@ -26,7 +26,8 @@ struct be_ethtool_stat {
int offset;
};
enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
PMEMSTAT, DRVSTAT};
#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
offsetof(_struct, field)
#define NETSTAT_INFO(field) #field, NETSTAT,\
@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
field)
#define ERXSTAT_INFO(field) #field, ERXSTAT,\
FIELDINFO(struct be_erx_stats, field)
#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\
FIELDINFO(struct be_pmem_stats, field)
#define DRVSTAT_INFO(field) #field, DRVSTAT,\
FIELDINFO(struct be_drv_stats, \
field)
static const struct be_ethtool_stat et_stats[] = {
{NETSTAT_INFO(rx_packets)},
@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
{MISCSTAT_INFO(rx_drops_too_many_frags)},
{MISCSTAT_INFO(rx_drops_invalid_ring)},
{MISCSTAT_INFO(forwarded_packets)},
{MISCSTAT_INFO(rx_drops_mtu)}
{MISCSTAT_INFO(rx_drops_mtu)},
{MISCSTAT_INFO(port0_jabber_events)},
{MISCSTAT_INFO(port1_jabber_events)},
{PMEMSTAT_INFO(eth_red_drops)},
{DRVSTAT_INFO(be_on_die_temperature)}
};
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
"MAC Loopback test",
"PHY Loopback test",
"External Loopback test",
"DDR DMA test"
"DDR DMA test",
"Link test"
};
@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
case MISCSTAT:
p = &hw_stats->rxf;
break;
case PMEMSTAT:
p = &hw_stats->pmem;
break;
case DRVSTAT:
p = &adapter->drv_stats;
break;
}
p = (u8 *)p + et_stats[i].offset;
@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
&phy_cmd.dma);
phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
phy_cmd.size, &phy_cmd.dma,
GFP_KERNEL);
if (!phy_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
return -ENOMEM;
@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
adapter->port_type = ecmd->port;
adapter->transceiver = ecmd->transceiver;
adapter->autoneg = ecmd->autoneg;
pci_free_consistent(adapter->pdev, phy_cmd.size,
phy_cmd.va, phy_cmd.dma);
dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
phy_cmd.dma);
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
int status;
u32 cur;
be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
if (cur == BEACON_STATE_ENABLED)
return 0;
@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data)
if (data < 2)
data = 2;
status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
BEACON_STATE_ENABLED);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(data*HZ);
status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
BEACON_STATE_DISABLED);
return status;
}
static bool
be_is_wol_supported(struct be_adapter *adapter)
{
if (!be_physfn(adapter))
return false;
else
return true;
}
static void
be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_MAGIC;
if (be_is_wol_supported(adapter))
wol->supported = WAKE_MAGIC;
if (adapter->wol)
wol->wolopts = WAKE_MAGIC;
else
@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
if (wol->wolopts & WAKE_MAGIC)
if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
adapter->wol = true;
else
adapter->wol = false;
@ -554,8 +582,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
&ddrdma_cmd.dma);
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
&ddrdma_cmd.dma, GFP_KERNEL);
if (!ddrdma_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM;
@ -569,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter)
}
err:
pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
ddrdma_cmd.va, ddrdma_cmd.dma);
dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
ddrdma_cmd.dma);
return ret;
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
u64 *status)
{
be_cmd_set_loopback(adapter, adapter->port_num,
be_cmd_set_loopback(adapter, adapter->hba_port_num,
loopback_type, 1);
*status = be_cmd_loopback_test(adapter, adapter->port_num,
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
loopback_type, 1500,
2, 0xabc);
be_cmd_set_loopback(adapter, adapter->port_num,
be_cmd_set_loopback(adapter, adapter->hba_port_num,
BE_NO_LOOPBACK, 1);
return *status;
}
@ -621,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
&qos_link_speed) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
} else if (mac_speed) {
} else if (!mac_speed) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = 1;
}
}
@ -662,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
&eeprom_cmd.dma);
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
&eeprom_cmd.dma, GFP_KERNEL);
if (!eeprom_cmd.va) {
dev_err(&adapter->pdev->dev,
@ -677,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
}
pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
eeprom_cmd.dma);
dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
eeprom_cmd.dma);
return status;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2005 - 2010 ServerEngines
* Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -8,11 +8,11 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
* linux-drivers@emulex.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
/********* Mailbox door bell *************/
@ -44,6 +44,18 @@
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
#define SLIPORT_STATUS_OFFSET 0x404
#define SLIPORT_CONTROL_OFFSET 0x408
#define SLIPORT_STATUS_ERR_MASK 0x80000000
#define SLIPORT_STATUS_RN_MASK 0x01000000
#define SLIPORT_STATUS_RDY_MASK 0x00800000
#define SLI_PORT_CONTROL_IP_MASK 0x08000000
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@ -289,10 +301,10 @@ struct be_eth_rx_d {
/* RX Compl Queue Descriptor */
/* Pseudo amap definition for eth_rx_compl in which each bit of the
* actual structure is defined as a byte: used to calculate
/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
* each bit of the actual structure is defined as a byte: used to calculate
* offset/shift/mask of each field */
struct amap_eth_rx_compl {
struct amap_eth_rx_compl_v0 {
u8 vlan_tag[16]; /* dword 0 */
u8 pktsize[14]; /* dword 0 */
u8 port; /* dword 0 */
@ -323,10 +335,92 @@ struct amap_eth_rx_compl {
u8 rsshash[32]; /* dword 3 */
} __packed;
/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
* each bit of the actual structure is defined as a byte: used to calculate
* offset/shift/mask of each field */
struct amap_eth_rx_compl_v1 {
u8 vlan_tag[16]; /* dword 0 */
u8 pktsize[14]; /* dword 0 */
u8 vtp; /* dword 0 */
u8 ip_opt; /* dword 0 */
u8 err; /* dword 1 */
u8 rsshp; /* dword 1 */
u8 ipf; /* dword 1 */
u8 tcpf; /* dword 1 */
u8 udpf; /* dword 1 */
u8 ipcksm; /* dword 1 */
u8 l4_cksm; /* dword 1 */
u8 ip_version; /* dword 1 */
u8 macdst[7]; /* dword 1 */
u8 rsvd0; /* dword 1 */
u8 fragndx[10]; /* dword 1 */
u8 ct[2]; /* dword 1 */
u8 sw; /* dword 1 */
u8 numfrags[3]; /* dword 1 */
u8 rss_flush; /* dword 2 */
u8 cast_enc[2]; /* dword 2 */
u8 vtm; /* dword 2 */
u8 rss_bank; /* dword 2 */
u8 port[2]; /* dword 2 */
u8 vntagp; /* dword 2 */
u8 header_len[8]; /* dword 2 */
u8 header_split[2]; /* dword 2 */
u8 rsvd1[13]; /* dword 2 */
u8 valid; /* dword 2 */
u8 rsshash[32]; /* dword 3 */
} __packed;
struct be_eth_rx_compl {
u32 dw[4];
};
struct mgmt_hba_attribs {
u8 flashrom_version_string[32];
u8 manufacturer_name[32];
u32 supported_modes;
u32 rsvd0[3];
u8 ncsi_ver_string[12];
u32 default_extended_timeout;
u8 controller_model_number[32];
u8 controller_description[64];
u8 controller_serial_number[32];
u8 ip_version_string[32];
u8 firmware_version_string[32];
u8 bios_version_string[32];
u8 redboot_version_string[32];
u8 driver_version_string[32];
u8 fw_on_flash_version_string[32];
u32 functionalities_supported;
u16 max_cdblength;
u8 asic_revision;
u8 generational_guid[16];
u8 hba_port_count;
u16 default_link_down_timeout;
u8 iscsi_ver_min_max;
u8 multifunction_device;
u8 cache_valid;
u8 hba_status;
u8 max_domains_supported;
u8 phy_port;
u32 firmware_post_status;
u32 hba_mtu[8];
u32 rsvd1[4];
};
struct mgmt_controller_attrib {
struct mgmt_hba_attribs hba_attribs;
u16 pci_vendor_id;
u16 pci_device_id;
u16 pci_sub_vendor_id;
u16 pci_sub_system_id;
u8 pci_bus_number;
u8 pci_device_number;
u8 pci_function_number;
u8 interface_type;
u64 unique_identifier;
u32 rsvd0[5];
};
struct controller_id {
u32 vendor;
u32 device;

File diff suppressed because it is too large Load diff

View file

@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
}
unmap_array[unmap_cons].skb = NULL;
pci_unmap_single(bnad->pcidev,
pci_unmap_addr(&unmap_array[unmap_cons],
dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
if (++unmap_cons >= unmap_q->q_depth)
break;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
pci_unmap_page(bnad->pcidev,
pci_unmap_addr(&unmap_array[unmap_cons],
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
if (++unmap_cons >= unmap_q->q_depth)
break;
@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
sent_bytes += skb->len;
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
pci_unmap_single(bnad->pcidev,
pci_unmap_addr(&unmap_array[unmap_cons],
dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
PCI_DMA_TODEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
prefetch(&unmap_array[unmap_cons + 1]);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
prefetch(&unmap_array[unmap_cons + 1]);
pci_unmap_page(bnad->pcidev,
pci_unmap_addr(&unmap_array[unmap_cons],
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
}
@ -340,19 +340,22 @@ static void
bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_unmap_q *unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
int unmap_cons;
unmap_q = rcb->unmap_q;
unmap_array = unmap_q->unmap_array;
for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
skb = unmap_q->unmap_array[unmap_cons].skb;
skb = unmap_array[unmap_cons].skb;
if (!skb)
continue;
unmap_q->unmap_array[unmap_cons].skb = NULL;
pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
unmap_array[unmap_cons],
dma_addr), rcb->rxq->buffer_size,
PCI_DMA_FROMDEVICE);
unmap_array[unmap_cons].skb = NULL;
dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
rcb->rxq->buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
bnad_reset_rcb(bnad, rcb);
@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
skb->dev = bnad->netdev;
skb_reserve(skb, NET_IP_ALIGN);
unmap_array[unmap_prod].skb = skb;
dma_addr = pci_map_single(bnad->pcidev, skb->data,
rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
rcb->rxq->buffer_size,
DMA_FROM_DEVICE);
dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
struct bna_rcb *rcb = NULL;
unsigned int wi_range, packets = 0, wis = 0;
struct bnad_unmap_q *unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
u32 flags;
u32 flags, unmap_cons;
u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[1];
unmap_q = rcb->unmap_q;
unmap_array = unmap_q->unmap_array;
unmap_cons = unmap_q->consumer_index;
skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
skb = unmap_array[unmap_cons].skb;
BUG_ON(!(skb));
unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
pci_unmap_single(bnad->pcidev,
pci_unmap_addr(&unmap_q->
unmap_array[unmap_q->
consumer_index],
unmap_array[unmap_cons].skb = NULL;
dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
rcb->rxq->buffer_size,
PCI_DMA_FROMDEVICE);
rcb->rxq->buffer_size,
DMA_FROM_DEVICE);
BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
/* Should be more efficient ? Performance ? */
@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
if (mem_info->mem_type == BNA_MEM_T_DMA) {
BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
dma_pa);
pci_free_consistent(bnad->pcidev,
mem_info->mdl[i].len,
mem_info->mdl[i].kva, dma_pa);
dma_free_coherent(&bnad->pcidev->dev,
mem_info->mdl[i].len,
mem_info->mdl[i].kva, dma_pa);
} else
kfree(mem_info->mdl[i].kva);
}
@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
for (i = 0; i < mem_info->num; i++) {
mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva =
pci_alloc_consistent(bnad->pcidev,
mem_info->len, &dma_pa);
dma_alloc_coherent(&bnad->pcidev->dev,
mem_info->len, &dma_pa,
GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL)
goto err_return;
@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unmap_q->unmap_array[unmap_prod].skb = skb;
BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(skb_headlen(skb));
dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
PCI_DMA_TODEVICE);
pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(size);
dma_addr =
pci_map_page(bnad->pcidev, frag->page,
frag->page_offset, size,
PCI_DMA_TODEVICE);
pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
frag->page_offset, size, DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
!dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
*using_dac = 1;
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err)
goto release_regions;
}

View file

@ -181,7 +181,7 @@ struct bnad_rx_info {
/* Unmap queues for Tx / Rx cleanup */
struct bnad_skb_unmap {
struct sk_buff *skb;
DECLARE_PCI_UNMAP_ADDR(dma_addr)
DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
struct bnad_unmap_q {

View file

@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
* Copyright (c) 2004-2010 Broadcom Corporation
* Copyright (c) 2004-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -56,11 +56,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
#define DRV_MODULE_VERSION "2.0.21"
#define DRV_MODULE_RELDATE "Dec 23, 2010"
#define DRV_MODULE_VERSION "2.1.6"
#define DRV_MODULE_RELDATE "Mar 7, 2011"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
c_ops = bp->cnic_ops;
c_ops = rcu_dereference_protected(bp->cnic_ops,
lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
info.cmd = CNIC_CTL_STOP_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
c_ops = bp->cnic_ops;
c_ops = rcu_dereference_protected(bp->cnic_ops,
lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@ -8315,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#endif
};
static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
static void inline vlan_features_add(struct net_device *dev, u32 flags)
{
dev->vlan_features |= flags;
}

View file

@ -1,6 +1,6 @@
/* bnx2.h: Broadcom NX2 network driver.
*
* Copyright (c) 2004-2009 Broadcom Corporation
* Copyright (c) 2004-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -6207,6 +6207,8 @@ struct l2_fhdr {
#define BNX2_CP_SCRATCH 0x001a0000
#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
/*
* mcp_reg definition
@ -6759,7 +6761,7 @@ struct bnx2 {
u32 tx_wake_thresh;
#ifdef BCM_CNIC
struct cnic_ops *cnic_ops;
struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
#endif

View file

@ -22,8 +22,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
#define DRV_MODULE_VERSION "1.62.00-6"
#define DRV_MODULE_RELDATE "2011/01/30"
#define DRV_MODULE_VERSION "1.62.11-0"
#define DRV_MODULE_RELDATE "2011/01/31"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@ -31,7 +31,7 @@
#define BNX2X_NEW_NAPI
#if defined(CONFIG_DCB)
#define BCM_DCB
#define BCM_DCBNL
#endif
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
#define bnx2x_uc_addr(ha) ((ha)->addr)
#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
#define U64_HI(x) (u32)(((u64)(x)) >> 32)
@ -341,6 +342,8 @@ struct bnx2x_fastpath {
/* chip independed shortcut into rx_prods_offset memory */
u32 ustorm_rx_prods_offset;
u32 rx_buf_size;
dma_addr_t status_blk_mapping;
struct sw_tx_bd *tx_buf_ring;
@ -428,6 +431,10 @@ struct bnx2x_fastpath {
};
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
#ifdef BCM_CNIC
/* FCoE L2 `fastpath' is right after the eth entries */
#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
@ -810,6 +817,7 @@ struct bnx2x_slowpath {
struct eth_stats_query fw_stats;
struct mac_configuration_cmd mac_config;
struct mac_configuration_cmd mcast_config;
struct mac_configuration_cmd uc_mac_config;
struct client_init_ramrod_data client_init_data;
/* used by dmae command executer */
@ -911,7 +919,6 @@ struct bnx2x {
int tx_ring_size;
u32 rx_csum;
u32 rx_buf_size;
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
@ -939,7 +946,7 @@ struct bnx2x {
struct eth_spe *spq_prod_bd;
struct eth_spe *spq_last_bd;
__le16 *dsb_sp_prod;
atomic_t spq_left; /* serialize spq */
atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
/* used to synchronize spq accesses */
spinlock_t spq_lock;
@ -949,6 +956,7 @@ struct bnx2x {
u16 eq_prod;
u16 eq_cons;
__le16 *eq_cons_sb;
atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
/* Flags for marking that there is a STAT_QUERY or
SET_MAC ramrod pending */
@ -976,8 +984,12 @@ struct bnx2x {
#define MF_FUNC_DIS 0x1000
#define FCOE_MACS_SET 0x2000
#define NO_FCOE_FLAG 0x4000
#define NO_ISCSI_OOO_FLAG 0x8000
#define NO_ISCSI_FLAG 0x10000
#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
int pf_num; /* absolute PF number */
int pfid; /* per-path PF number */
@ -1064,6 +1076,7 @@ struct bnx2x {
int num_queues;
int disable_tpa;
int int_mode;
u32 *rx_indir_table;
struct tstorm_eth_mac_filter_config mac_filters;
#define BNX2X_ACCEPT_NONE 0x0000
@ -1110,7 +1123,7 @@ struct bnx2x {
#define BNX2X_CNIC_FLAG_MAC_SET 1
void *t2;
dma_addr_t t2_mapping;
struct cnic_ops *cnic_ops;
struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
u32 cnic_tag;
struct cnic_eth_dev cnic_eth_dev;
@ -1125,13 +1138,12 @@ struct bnx2x {
u16 cnic_kwq_pending;
u16 cnic_spq_pending;
struct mutex cnic_mutex;
u8 iscsi_mac[ETH_ALEN];
u8 fip_mac[ETH_ALEN];
#endif
int dmae_ready;
/* used to synchronize dmae accesses */
struct mutex dmae_mutex;
spinlock_t dmae_lock;
/* used to protect the FW mail box */
struct mutex fw_mb_mutex;
@ -1448,6 +1460,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
u32 data_hi, u32 data_lo, int common);
/* Clears multicast and unicast list configuration in the chip. */
void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
void bnx2x_invalidate_uc_list(struct bnx2x *bp);
void bnx2x_update_coalesce(struct bnx2x *bp);
int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
@ -1787,5 +1805,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_push_indir_table(struct bnx2x *bp);
#endif /* bnx2x.h */

View file

@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
/* move empty skb from pool to prod and map it */
prod_rx_buf->skb = fp->tpa_pool[queue].skb;
mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
bp->rx_buf_size, DMA_FROM_DEVICE);
fp->rx_buf_size, DMA_FROM_DEVICE);
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* move partial skb from cons to pool (don't unmap yet) */
@ -367,13 +367,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
struct sk_buff *skb = rx_buf->skb;
/* alloc new skb */
struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, DMA_FROM_DEVICE);
fp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
@ -385,10 +385,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > bp->rx_buf_size) {
if (pad + len > fp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
"pad %d len %d rx_buf_size %d\n",
pad, len, bp->rx_buf_size);
pad, len, fp->rx_buf_size);
bnx2x_panic();
return;
}
@ -618,7 +618,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size,
fp->rx_buf_size,
DMA_FROM_DEVICE);
skb_reserve(skb, pad);
skb_put(skb, len);
@ -858,19 +858,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
u16 ring_prod;
int i, j;
bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
IP_HEADER_ALIGNMENT_PADDING;
DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
if (!fp->disable_tpa) {
for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb =
netdev_alloc_skb(bp->dev, bp->rx_buf_size);
netdev_alloc_skb(bp->dev, fp->rx_buf_size);
if (!fp->tpa_pool[i].skb) {
BNX2X_ERR("Failed to allocate TPA "
"skb pool for queue[%d] - "
@ -978,7 +975,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, DMA_FROM_DEVICE);
fp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->skb = NULL;
dev_kfree_skb(skb);
@ -1303,6 +1300,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
return rc;
}
static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
{
int i;
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
/* Always use a mini-jumbo MTU for the FCoE L2 ring */
if (IS_FCOE_IDX(i))
/*
* Although there are no IP frames expected to arrive to
* this ring we still want to add an
* IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
* overrun attack.
*/
fp->rx_buf_size =
BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
else
fp->rx_buf_size =
bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
IP_HEADER_ALIGNMENT_PADDING;
}
}
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
@ -1326,6 +1348,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* must be called before memory allocation and HW init */
bnx2x_ilt_set_info(bp);
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
@ -1481,6 +1506,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_set_eth_mac(bp, 1);
/* Clear MC configuration */
if (CHIP_IS_E1(bp))
bnx2x_invalidate_e1_mc_list(bp);
else
bnx2x_invalidate_e1h_mc_list(bp);
/* Clear UC lists configuration */
bnx2x_invalidate_uc_list(bp);
if (bp->pending_max) {
bnx2x_update_max_mf_config(bp, bp->pending_max);
bp->pending_max = 0;
@ -1489,25 +1523,23 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->port.pmf)
bnx2x_initial_phy_init(bp, load_mode);
/* Initialize Rx filtering */
bnx2x_set_rx_mode(bp->dev);
/* Start fast path */
switch (load_mode) {
case LOAD_NORMAL:
/* Tx queue should be only reenabled */
netif_tx_wake_all_queues(bp->dev);
/* Initialize the receive filter. */
bnx2x_set_rx_mode(bp->dev);
break;
case LOAD_OPEN:
netif_tx_start_all_queues(bp->dev);
smp_mb__after_clear_bit();
/* Initialize the receive filter. */
bnx2x_set_rx_mode(bp->dev);
break;
case LOAD_DIAG:
/* Initialize the receive filter. */
bnx2x_set_rx_mode(bp->dev);
bp->state = BNX2X_STATE_DIAG;
break;

View file

@ -831,11 +831,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
if (unlikely(skb == NULL))
return -ENOMEM;
mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
@ -901,7 +901,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
if (fp->tpa_state[i] == BNX2X_TPA_START)
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, DMA_FROM_DEVICE);
fp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_buf->skb = NULL;

View file

@ -19,6 +19,9 @@
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/errno.h>
#ifdef BCM_DCBNL
#include <linux/dcbnl.h>
#endif
#include "bnx2x.h"
#include "bnx2x_cmn.h"
@ -508,13 +511,75 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
return 0;
}
#ifdef BCM_DCBNL
static inline
u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
{
u8 pri;
/* Choose the highest priority */
for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
if (ent->pri_bitmap & (1 << pri))
break;
return pri;
}
static inline
u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
{
return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
DCB_APP_IDTYPE_ETHTYPE;
}
static inline
void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
{
int i;
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
~DCBX_APP_ENTRY_VALID;
}
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
{
int i, err = 0;
for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
struct dcbx_app_priority_entry *ent =
&bp->dcbx_local_feat.app.app_pri_tbl[i];
if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
/* avoid invalid user-priority */
if (up) {
struct dcb_app app;
app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
app.protocol = ent->app_id;
app.priority = delall ? 0 : up;
err = dcb_setapp(bp->dev, &app);
}
}
}
return err;
}
#endif
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
{
switch (state) {
case BNX2X_DCBX_STATE_NEG_RECEIVED:
{
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
#ifdef BCM_DCBNL
/**
* Delete app tlvs from dcbnl before reading new
* negotiation results
*/
bnx2x_dcbnl_update_applist(bp, true);
#endif
/* Read neg results if dcbx is in the FW */
if (bnx2x_dcbx_read_shmem_neg_results(bp))
return;
@ -526,10 +591,24 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bp->dcbx_error);
if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
#ifdef BCM_DCBNL
/**
* Add new app tlvs to dcbnl
*/
bnx2x_dcbnl_update_applist(bp, false);
#endif
bnx2x_dcbx_stop_hw_tx(bp);
return;
}
/* fall through */
#ifdef BCM_DCBNL
/**
* Invalidate the local app tlvs if they are not added
* to the dcbnl app list to avoid deleting them from
* the list later on
*/
bnx2x_dcbx_invalidate_local_apps(bp);
#endif
}
case BNX2X_DCBX_STATE_TX_PAUSED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
@ -1505,8 +1584,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
}
/* DCB netlink */
#ifdef BCM_DCB
#include <linux/dcbnl.h>
#ifdef BCM_DCBNL
#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
@ -1816,32 +1894,6 @@ static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
}
static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent,
u8 idtype, u16 idval)
{
if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID))
return false;
switch (idtype) {
case DCB_APP_IDTYPE_ETHTYPE:
if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
DCBX_APP_SF_ETH_TYPE)
return false;
break;
case DCB_APP_IDTYPE_PORTNUM:
if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
DCBX_APP_SF_PORT)
return false;
break;
default:
return false;
}
if (app_ent->app_id != idval)
return false;
return true;
}
static void bnx2x_admin_app_set_ent(
struct bnx2x_admin_priority_app_table *app_ent,
u8 idtype, u16 idval, u8 up)
@ -1943,30 +1995,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
return bnx2x_set_admin_app_up(bp, idtype, idval, up);
}
static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype,
u16 idval)
{
int i;
u8 up = 0;
struct bnx2x *bp = netdev_priv(netdev);
DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval);
/* iterate over the app entries looking for idtype and idval */
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i],
idtype, idval))
break;
if (i < DCBX_MAX_APP_PROTOCOL)
/* if found return up */
up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap;
else
DP(NETIF_MSG_LINK, "app not found\n");
return up;
}
static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
{
struct bnx2x *bp = netdev_priv(netdev);
@ -2107,7 +2135,6 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
.setnumtcs = bnx2x_dcbnl_set_numtcs,
.getpfcstate = bnx2x_dcbnl_get_pfc_state,
.setpfcstate = bnx2x_dcbnl_set_pfc_state,
.getapp = bnx2x_dcbnl_get_app_up,
.setapp = bnx2x_dcbnl_set_app_up,
.getdcbx = bnx2x_dcbnl_get_dcbx,
.setdcbx = bnx2x_dcbnl_set_dcbx,
@ -2115,4 +2142,4 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
.setfeatcfg = bnx2x_dcbnl_set_featcfg,
};
#endif /* BCM_DCB */
#endif /* BCM_DCBNL */

View file

@ -189,8 +189,9 @@ enum {
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
/* DCB netlink */
#ifdef BCM_DCB
#ifdef BCM_DCBNL
extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
#endif /* BCM_DCB */
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
#endif /* BCM_DCBNL */
#endif /* BNX2X_DCB_H */

View file

@ -1617,7 +1617,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
/* prepare the loopback packet */
pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
if (!skb) {
rc = -ENOMEM;
goto test_loopback_exit;
@ -2131,6 +2131,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
return 0;
}
static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
void *rules __always_unused)
{
struct bnx2x *bp = netdev_priv(dev);
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = BNX2X_NUM_ETH_QUEUES(bp);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int bnx2x_get_rxfh_indir(struct net_device *dev,
struct ethtool_rxfh_indir *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t copy_size =
min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return -EOPNOTSUPP;
indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
memcpy(indir->ring_index, bp->rx_indir_table,
copy_size * sizeof(bp->rx_indir_table[0]));
return 0;
}
static int bnx2x_set_rxfh_indir(struct net_device *dev,
const struct ethtool_rxfh_indir *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return -EOPNOTSUPP;
/* Validate size and indices */
if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
return -EINVAL;
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
return -EINVAL;
memcpy(bp->rx_indir_table, indir->ring_index,
indir->size * sizeof(bp->rx_indir_table[0]));
bnx2x_push_indir_table(bp);
return 0;
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_settings = bnx2x_get_settings,
.set_settings = bnx2x_set_settings,
@ -2167,6 +2220,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.phys_id = bnx2x_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
};
void bnx2x_set_ethtool_ops(struct net_device *netdev)

View file

@ -11,21 +11,28 @@
#include "bnx2x_fw_defs.h"
#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
struct license_key {
u32 reserved[6];
#if defined(__BIG_ENDIAN)
u16 max_iscsi_init_conn;
u16 max_iscsi_trgt_conn;
#elif defined(__LITTLE_ENDIAN)
u16 max_iscsi_trgt_conn;
u16 max_iscsi_init_conn;
#endif
u32 max_iscsi_conn;
#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
u32 reserved_a[6];
u32 reserved_a;
u32 max_fcoe_conn;
#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
u32 reserved_b[4];
};
#define PORT_0 0
#define PORT_1 1
#define PORT_MAX 2
@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
u32 Reserved0[16]; /* 0x158 */
u32 Reserved0[3]; /* 0x158 */
/* Controls the TX laser of the SFP+ module */
u32 sfp_ctrl; /* 0x164 */
#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
#define PORT_HW_CFG_TX_LASER_SHIFT 0
#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
/* Controls the fault module LED of the SFP+ */
#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
u32 Reserved01[12]; /* 0x158 */
/* for external PHY, or forced mode or during AN */
u16 xgxs_config_rx[4]; /* 0x198 */
@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u32 Reserved1[56]; /* 0x1A8 */
u32 default_cfg; /* 0x288 */
#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
/*
* When KR link is required to be set to force which is not
* KR-compliant, this parameter determine what is the trigger for it.
* When GPIO is selected, low input will force the speed. Currently
* default speed is 1G. In the future, it may be widen to select the
* forced speed in with another parameter. Note when force-1G is
* enabled, it override option 56: Link Speed option.
*/
#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
/* Enable to determine with which GPIO to reset the external phy */
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
/* Enable BAM on KR */
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
/* Enable Common Mode Sense */
#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@ -381,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
/* Copyright 2008-2010 Broadcom Corporation
/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@ -33,7 +33,7 @@
#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
#define SPEED_AUTO_NEG 0
#define SPEED_AUTO_NEG 0
#define SPEED_12000 12000
#define SPEED_12500 12500
#define SPEED_13000 13000
@ -44,8 +44,8 @@
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
#define SFP_EEPROM_VENDOR_OUI_SIZE 3
#define SFP_EEPROM_PART_NO_ADDR 0x28
#define SFP_EEPROM_PART_NO_SIZE 16
#define SFP_EEPROM_PART_NO_ADDR 0x28
#define SFP_EEPROM_PART_NO_SIZE 16
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@ -62,7 +62,7 @@
#define SINGLE_MEDIA(params) (params->num_phys == 2)
/* Dual Media board contains two external phy with different media */
#define DUAL_MEDIA(params) (params->num_phys == 3)
#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
@ -201,12 +201,14 @@ struct link_params {
/* Default / User Configuration */
u8 loopback_mode;
#define LOOPBACK_NONE 0
#define LOOPBACK_EMAC 1
#define LOOPBACK_BMAC 2
#define LOOPBACK_NONE 0
#define LOOPBACK_EMAC 1
#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS 3
#define LOOPBACK_EXT_PHY 4
#define LOOPBACK_EXT 5
#define LOOPBACK_EXT 5
#define LOOPBACK_UMAC 6
#define LOOPBACK_XMAC 7
/* Device parameters */
u8 mac_addr[6];
@ -230,10 +232,11 @@ struct link_params {
/* Phy register parameter */
u32 chip_id;
/* features */
u32 feature_config_flags;
#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
/* Will be populated during common init */
struct bnx2x_phy phy[MAX_PHYS];
@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
/* Used to configure the ETS to BW limited */
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
const u32 cos1_bw);
const u32 cos1_bw);
/* Used to configure the ETS to strict */
u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);

View file

@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
/* lock the dmae channel */
mutex_lock(&bp->dmae_mutex);
spin_lock_bh(&bp->dmae_lock);
/* reset completion */
*wb_comp = 0;
@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
unlock:
mutex_unlock(&bp->dmae_mutex);
spin_unlock_bh(&bp->dmae_lock);
return rc;
}
@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
}
smp_mb__before_atomic_inc();
atomic_inc(&bp->spq_left);
atomic_inc(&bp->cq_spq_left);
/* push the change in fp->state and towards the memory */
smp_wmb();
@ -2484,8 +2484,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
rxq_init->sge_map = fp->rx_sge_mapping;
rxq_init->rcq_map = fp->rx_comp_mapping;
rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
rxq_init->mtu = bp->dev->mtu;
rxq_init->buf_sz = bp->rx_buf_size;
/* Always use mini-jumbo MTU for FCoE L2 ring */
if (IS_FCOE_FP(fp))
rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
else
rxq_init->mtu = bp->dev->mtu;
rxq_init->buf_sz = fp->rx_buf_size;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->cl_id = fp->cl_id;
rxq_init->spcl_id = fp->cl_id;
@ -2737,11 +2743,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spin_lock_bh(&bp->spq_lock);
if (!atomic_read(&bp->spq_left)) {
BNX2X_ERR("BUG! SPQ ring full!\n");
spin_unlock_bh(&bp->spq_lock);
bnx2x_panic();
return -EBUSY;
if (common) {
if (!atomic_read(&bp->eq_spq_left)) {
BNX2X_ERR("BUG! EQ ring full!\n");
spin_unlock_bh(&bp->spq_lock);
bnx2x_panic();
return -EBUSY;
}
} else if (!atomic_read(&bp->cq_spq_left)) {
BNX2X_ERR("BUG! SPQ ring full!\n");
spin_unlock_bh(&bp->spq_lock);
bnx2x_panic();
return -EBUSY;
}
spe = bnx2x_sp_get_next(bp);
@ -2772,20 +2785,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
/* stats ramrod has it's own slot on the spq */
if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
/* It's ok if the actual decrement is issued towards the memory
* somewhere between the spin_lock and spin_unlock. Thus no
* more explict memory barrier is needed.
*/
atomic_dec(&bp->spq_left);
if (common)
atomic_dec(&bp->eq_spq_left);
else
atomic_dec(&bp->cq_spq_left);
}
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
"SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
"type(0x%x) left %x\n",
"type(0x%x) left (ETH, COMMON) (%x,%x)\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
(u32)(U64_LO(bp->spq_mapping) +
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
HW_CID(bp, cid), data_hi, data_lo, type,
atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
@ -3697,8 +3716,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
sw_cons = bp->eq_cons;
sw_prod = bp->eq_prod;
DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
hw_cons, sw_cons, atomic_read(&bp->spq_left));
DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
for (; sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@ -3763,13 +3782,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bp->set_mac_pending = 0;
if (elem->message.data.set_mac_event.echo)
bp->set_mac_pending = 0;
break;
case (EVENT_RING_OPCODE_SET_MAC |
BNX2X_STATE_CLOSING_WAIT4_HALT):
DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
bp->set_mac_pending = 0;
if (elem->message.data.set_mac_event.echo)
bp->set_mac_pending = 0;
break;
default:
/* unknown event log error and continue */
@ -3781,7 +3802,7 @@ next_spqe:
} /* for */
smp_mb__before_atomic_inc();
atomic_add(spqe_cnt, &bp->spq_left);
atomic_add(spqe_cnt, &bp->eq_spq_left);
bp->eq_cons = sw_cons;
bp->eq_prod = sw_prod;
@ -4208,13 +4229,13 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
for_each_eth_queue(bp, i)
bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
bp->rx_ticks, bp->tx_ticks);
bp->tx_ticks, bp->rx_ticks);
}
static void bnx2x_init_sp_ring(struct bnx2x *bp)
{
spin_lock_init(&bp->spq_lock);
atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
bp->spq_prod_idx = 0;
bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@ -4239,9 +4260,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
bp->eq_cons = 0;
bp->eq_prod = NUM_EQ_DESC;
bp->eq_cons_sb = BNX2X_EQ_INDEX;
/* we want a warning message before it gets rought... */
atomic_set(&bp->eq_spq_left,
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
static void bnx2x_init_ind_table(struct bnx2x *bp)
void bnx2x_push_indir_table(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
int i;
@ -4249,13 +4273,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return;
DP(NETIF_MSG_IFUP,
"Initializing indirection table multi_mode %d\n", bp->multi_mode);
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
bp->fp->cl_id + (i % (bp->num_queues -
NONE_ETH_CONTEXT_USE)));
bp->fp->cl_id + bp->rx_indir_table[i]);
}
static void bnx2x_init_ind_table(struct bnx2x *bp)
{
int i;
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_push_indir_table(bp);
}
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@ -5851,7 +5882,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
BP_ABS_FUNC(bp), load_code);
bp->dmae_ready = 0;
mutex_init(&bp->dmae_mutex);
spin_lock_init(&bp->dmae_lock);
rc = bnx2x_gunzip_init(bp);
if (rc)
return rc;
@ -6003,6 +6034,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
BNX2X_FREE(bp->rx_indir_table);
#undef BNX2X_PCI_FREE
#undef BNX2X_KFREE
}
@ -6133,6 +6166,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
/* EQ */
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
TSTORM_INDIRECTION_TABLE_SIZE);
return 0;
alloc_mem_err:
@ -6186,12 +6222,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
int ramrod_flags = WAIT_RAMROD_COMMON;
bp->set_mac_pending = 1;
smp_wmb();
config->hdr.length = 1;
config->hdr.offset = cam_offset;
config->hdr.client_id = 0xff;
config->hdr.reserved1 = 0;
/* Mark the single MAC configuration ramrod as opposed to a
* UC/MC list configuration).
*/
config->hdr.echo = 1;
/* primary MAC */
config->config_table[0].msb_mac_addr =
@ -6223,6 +6261,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
config->config_table[0].middle_mac_addr,
config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
mb();
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@ -6287,20 +6327,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
else if (CHIP_MODE_IS_4_PORT(bp))
return BP_FUNC(bp) * 32 + rel_offset;
return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
else
return BP_VN(bp) * 32 + rel_offset;
return E2_FUNC_MAX * rel_offset + BP_VN(bp);
}
/**
* LLH CAM line allocations: currently only iSCSI and ETH macs are
* relevant. In addition, current implementation is tuned for a
* single ETH MAC.
*
* When multiple unicast ETH MACs PF configuration in switch
* independent mode is required (NetQ, multiple netdev MACs,
* etc.), consider better utilisation of 16 per function MAC
* entries in the LLH memory.
*/
enum {
LLH_CAM_ISCSI_ETH_LINE = 0,
@ -6375,14 +6410,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
}
}
static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
{
return CHIP_REV_IS_SLOW(bp) ?
(BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
(BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
}
/* set mc list, do not wait as wait implies sleep and
* set_rx_mode can be invoked from non-sleepable context.
*
* Instead we use the same ramrod data buffer each time we need
* to configure a list of addresses, and use the fact that the
* list of MACs is changed in an incremental way and that the
* function is called under the netif_addr_lock. A temporary
* inconsistent CAM configuration (possible in case of a very fast
* sequence of add/del/add on the host side) will shortly be
* restored by the handler of the last ramrod.
*/
static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
{
int i = 0, old;
struct net_device *dev = bp->dev;
u8 offset = bnx2x_e1_cam_mc_offset(bp);
struct netdev_hw_addr *ha;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
return -EINVAL;
netdev_for_each_mc_addr(ha, dev) {
/* copy mac */
config_cmd->config_table[i].msb_mac_addr =
@ -6423,32 +6481,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
}
}
wmb();
config_cmd->hdr.length = i;
config_cmd->hdr.offset = offset;
config_cmd->hdr.client_id = 0xff;
config_cmd->hdr.reserved1 = 0;
/* Mark that this ramrod doesn't use bp->set_mac_pending for
* synchronization.
*/
config_cmd->hdr.echo = 0;
bp->set_mac_pending = 1;
smp_wmb();
mb();
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
}
static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
{
int i;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
int ramrod_flags = WAIT_RAMROD_COMMON;
u8 offset = bnx2x_e1_cam_mc_offset(bp);
bp->set_mac_pending = 1;
smp_wmb();
for (i = 0; i < config_cmd->hdr.length; i++)
for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
SET_FLAG(config_cmd->config_table[i].flags,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
T_ETH_MAC_COMMAND_INVALIDATE);
wmb();
config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
config_cmd->hdr.offset = offset;
config_cmd->hdr.client_id = 0xff;
/* We'll wait for a completion this time... */
config_cmd->hdr.echo = 1;
bp->set_mac_pending = 1;
mb();
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
@ -6458,6 +6531,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
}
/* Accept one or more multicasts */
static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
{
struct net_device *dev = bp->dev;
struct netdev_hw_addr *ha;
u32 mc_filter[MC_HASH_SIZE];
u32 crc, bit, regidx;
int i;
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
bnx2x_mc_addr(ha));
crc = crc32c_le(0, bnx2x_mc_addr(ha),
ETH_ALEN);
bit = (crc >> 24) & 0xff;
regidx = bit >> 5;
bit &= 0x1f;
mc_filter[regidx] |= (1 << bit);
}
for (i = 0; i < MC_HASH_SIZE; i++)
REG_WR(bp, MC_HASH_OFFSET(bp, i),
mc_filter[i]);
return 0;
}
void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
{
int i;
for (i = 0; i < MC_HASH_SIZE; i++)
REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
}
#ifdef BCM_CNIC
/**
* Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@ -6476,12 +6587,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
/* Send a SET_MAC ramrod */
bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
cam_offset, 0);
bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
return 0;
}
@ -7123,20 +7235,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
/* Give HW time to discard old tx messages */
msleep(1);
if (CHIP_IS_E1(bp)) {
/* invalidate mc list,
* wait and poll (interrupts are off)
*/
bnx2x_invlidate_e1_mc_list(bp);
bnx2x_set_eth_mac(bp, 0);
bnx2x_set_eth_mac(bp, 0);
} else {
bnx2x_invalidate_uc_list(bp);
if (CHIP_IS_E1(bp))
bnx2x_invalidate_e1_mc_list(bp);
else {
bnx2x_invalidate_e1h_mc_list(bp);
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
bnx2x_set_eth_mac(bp, 0);
for (i = 0; i < MC_HASH_SIZE; i++)
REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
}
#ifdef BCM_CNIC
@ -8405,11 +8512,47 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->common.shmem2_base);
}
#ifdef BCM_CNIC
static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
{
u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
/* Get the number of maximum allowed iSCSI and FCoE connections */
bp->cnic_eth_dev.max_iscsi_conn =
(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
bp->cnic_eth_dev.max_fcoe_conn =
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
bp->cnic_eth_dev.max_iscsi_conn,
bp->cnic_eth_dev.max_fcoe_conn);
/* If mamimum allowed number of connections is zero -
* disable the feature.
*/
if (!bp->cnic_eth_dev.max_iscsi_conn)
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
if (!bp->cnic_eth_dev.max_fcoe_conn)
bp->flags |= NO_FCOE_FLAG;
}
#endif
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
{
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
#ifdef BCM_CNIC
u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
u8 *fip_mac = bp->fip_mac;
#endif
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@ -8422,7 +8565,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
#ifdef BCM_CNIC
/* iSCSI NPAR MAC */
/* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
* FCoE MAC then the appropriate feature should be disabled.
*/
if (IS_MF_SI(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@ -8430,8 +8575,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
iscsi_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
iscsi_mac_addr_lower);
bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
}
BNX2X_DEV_INFO("Read iSCSI MAC: "
"0x%x:0x%04x\n", val2, val);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
/* Disable iSCSI OOO if MAC configuration is
* invalid.
*/
if (!is_valid_ether_addr(iscsi_mac)) {
bp->flags |= NO_ISCSI_OOO_FLAG |
NO_ISCSI_FLAG;
memset(iscsi_mac, 0, ETH_ALEN);
}
} else
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
fcoe_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
fcoe_mac_addr_lower);
BNX2X_DEV_INFO("Read FCoE MAC to "
"0x%x:0x%04x\n", val2, val);
bnx2x_set_mac_buf(fip_mac, val, val2);
/* Disable FCoE if MAC configuration is
* invalid.
*/
if (!is_valid_ether_addr(fip_mac)) {
bp->flags |= NO_FCOE_FLAG;
memset(bp->fip_mac, 0, ETH_ALEN);
}
} else
bp->flags |= NO_FCOE_FLAG;
}
#endif
} else {
@ -8445,7 +8621,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
iscsi_mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_lower);
bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
#endif
}
@ -8453,14 +8629,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
#ifdef BCM_CNIC
/* Inform the upper layers about FCoE MAC */
/* Set the FCoE MAC in modes other then MF_SI */
if (!CHIP_IS_E1x(bp)) {
if (IS_MF_SD(bp))
memcpy(bp->fip_mac, bp->dev->dev_addr,
sizeof(bp->fip_mac));
else
memcpy(bp->fip_mac, bp->iscsi_mac,
sizeof(bp->fip_mac));
memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
else if (!IS_MF(bp))
memcpy(fip_mac, iscsi_mac, ETH_ALEN);
}
#endif
}
@ -8623,6 +8797,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
#ifdef BCM_CNIC
bnx2x_get_cnic_info(bp);
#endif
return rc;
}
@ -8837,12 +9015,197 @@ static int bnx2x_close(struct net_device *dev)
return 0;
}
#define E1_MAX_UC_LIST 29
#define E1H_MAX_UC_LIST 30
#define E2_MAX_UC_LIST 14
static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
{
if (CHIP_IS_E1(bp))
return E1_MAX_UC_LIST;
else if (CHIP_IS_E1H(bp))
return E1H_MAX_UC_LIST;
else
return E2_MAX_UC_LIST;
}
static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
{
if (CHIP_IS_E1(bp))
/* CAM Entries for Port0:
* 0 - prim ETH MAC
* 1 - BCAST MAC
* 2 - iSCSI L2 ring ETH MAC
* 3-31 - UC MACs
*
* Port1 entries are allocated the same way starting from
* entry 32.
*/
return 3 + 32 * BP_PORT(bp);
else if (CHIP_IS_E1H(bp)) {
/* CAM Entries:
* 0-7 - prim ETH MAC for each function
* 8-15 - iSCSI L2 ring ETH MAC for each function
* 16 till 255 UC MAC lists for each function
*
* Remark: There is no FCoE support for E1H, thus FCoE related
* MACs are not considered.
*/
return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
bnx2x_max_uc_list(bp) * BP_FUNC(bp);
} else {
/* CAM Entries (there is a separate CAM per engine):
* 0-4 - prim ETH MAC for each function
* 4-7 - iSCSI L2 ring ETH MAC for each function
* 8-11 - FIP ucast L2 MAC for each function
* 12-15 - ALL_ENODE_MACS mcast MAC for each function
* 16 till 71 UC MAC lists for each function
*/
u8 func_idx =
(CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
bnx2x_max_uc_list(bp) * func_idx;
}
}
/* set uc list, do not wait as wait implies sleep and
* set_rx_mode can be invoked from non-sleepable context.
*
* Instead we use the same ramrod data buffer each time we need
* to configure a list of addresses, and use the fact that the
* list of MACs is changed in an incremental way and that the
* function is called under the netif_addr_lock. A temporary
* inconsistent CAM configuration (possible in case of very fast
* sequence of add/del/add on the host side) will shortly be
* restored by the handler of the last ramrod.
*/
static int bnx2x_set_uc_list(struct bnx2x *bp)
{
int i = 0, old;
struct net_device *dev = bp->dev;
u8 offset = bnx2x_uc_list_cam_offset(bp);
struct netdev_hw_addr *ha;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
return -EINVAL;
netdev_for_each_uc_addr(ha, dev) {
/* copy mac */
config_cmd->config_table[i].msb_mac_addr =
swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
config_cmd->config_table[i].middle_mac_addr =
swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
config_cmd->config_table[i].lsb_mac_addr =
swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
config_cmd->config_table[i].vlan_id = 0;
config_cmd->config_table[i].pf_id = BP_FUNC(bp);
config_cmd->config_table[i].clients_bit_vector =
cpu_to_le32(1 << BP_L_ID(bp));
SET_FLAG(config_cmd->config_table[i].flags,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
T_ETH_MAC_COMMAND_SET);
DP(NETIF_MSG_IFUP,
"setting UCAST[%d] (%04x:%04x:%04x)\n", i,
config_cmd->config_table[i].msb_mac_addr,
config_cmd->config_table[i].middle_mac_addr,
config_cmd->config_table[i].lsb_mac_addr);
i++;
/* Set uc MAC in NIG */
bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
LLH_CAM_ETH_LINE + i);
}
old = config_cmd->hdr.length;
if (old > i) {
for (; i < old; i++) {
if (CAM_IS_INVALID(config_cmd->
config_table[i])) {
/* already invalidated */
break;
}
/* invalidate */
SET_FLAG(config_cmd->config_table[i].flags,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
T_ETH_MAC_COMMAND_INVALIDATE);
}
}
wmb();
config_cmd->hdr.length = i;
config_cmd->hdr.offset = offset;
config_cmd->hdr.client_id = 0xff;
/* Mark that this ramrod doesn't use bp->set_mac_pending for
* synchronization.
*/
config_cmd->hdr.echo = 0;
mb();
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
}
void bnx2x_invalidate_uc_list(struct bnx2x *bp)
{
int i;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
int ramrod_flags = WAIT_RAMROD_COMMON;
u8 offset = bnx2x_uc_list_cam_offset(bp);
u8 max_list_size = bnx2x_max_uc_list(bp);
for (i = 0; i < max_list_size; i++) {
SET_FLAG(config_cmd->config_table[i].flags,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
T_ETH_MAC_COMMAND_INVALIDATE);
bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
}
wmb();
config_cmd->hdr.length = max_list_size;
config_cmd->hdr.offset = offset;
config_cmd->hdr.client_id = 0xff;
/* We'll wait for a completion this time... */
config_cmd->hdr.echo = 1;
bp->set_mac_pending = 1;
mb();
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
/* Wait for a completion */
bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
ramrod_flags);
}
static inline int bnx2x_set_mc_list(struct bnx2x *bp)
{
/* some multicasts */
if (CHIP_IS_E1(bp)) {
return bnx2x_set_e1_mc_list(bp);
} else { /* E1H and newer */
return bnx2x_set_e1h_mc_list(bp);
}
}
/* called with netif_tx_lock from dev_mcast.c */
void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
u32 rx_mode = BNX2X_RX_MODE_NORMAL;
int port = BP_PORT(bp);
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@ -8853,47 +9216,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
rx_mode = BNX2X_RX_MODE_PROMISC;
else if ((dev->flags & IFF_ALLMULTI) ||
((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
CHIP_IS_E1(bp)))
else if (dev->flags & IFF_ALLMULTI)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else { /* some multicasts */
if (CHIP_IS_E1(bp)) {
/*
* set mc list, do not wait as wait implies sleep
* and set_rx_mode can be invoked from non-sleepable
* context
*/
u8 offset = (CHIP_REV_IS_SLOW(bp) ?
BNX2X_MAX_EMUL_MULTI*(1 + port) :
BNX2X_MAX_MULTICAST*(1 + port));
else {
/* some multicasts */
if (bnx2x_set_mc_list(bp))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
bnx2x_set_e1_mc_list(bp, offset);
} else { /* E1H */
/* Accept one or more multicasts */
struct netdev_hw_addr *ha;
u32 mc_filter[MC_HASH_SIZE];
u32 crc, bit, regidx;
int i;
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
bnx2x_mc_addr(ha));
crc = crc32c_le(0, bnx2x_mc_addr(ha),
ETH_ALEN);
bit = (crc >> 24) & 0xff;
regidx = bit >> 5;
bit &= 0x1f;
mc_filter[regidx] |= (1 << bit);
}
for (i = 0; i < MC_HASH_SIZE; i++)
REG_WR(bp, MC_HASH_OFFSET(bp, i),
mc_filter[i]);
}
/* some unicasts */
if (bnx2x_set_uc_list(bp))
rx_mode = BNX2X_RX_MODE_PROMISC;
}
bp->rx_mode = rx_mode;
@ -8974,7 +9306,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_stop = bnx2x_close,
.ndo_start_xmit = bnx2x_start_xmit,
.ndo_select_queue = bnx2x_select_queue,
.ndo_set_multicast_list = bnx2x_set_rx_mode,
.ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = bnx2x_ioctl,
@ -9120,7 +9452,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
dev->vlan_features |= NETIF_F_TSO6;
#ifdef BCM_DCB
#ifdef BCM_DCBNL
dev->dcbnl_ops = &bnx2x_dcbnl_ops;
#endif
@ -9527,6 +9859,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
}
#endif
#ifdef BCM_DCBNL
/* Delete app tlvs from dcbnl */
bnx2x_dcbnl_update_applist(bp, true);
#endif
unregister_netdev(dev);
/* Delete all NAPI objects */
@ -9800,15 +10137,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
HW_CID(bp, BNX2X_ISCSI_ETH_CID));
}
/* There may be not more than 8 L2 and COMMON SPEs and not more
* than 8 L5 SPEs in the air.
/* There may be not more than 8 L2 and not more than 8 L5 SPEs
* We also check that the number of outstanding
* COMMON ramrods is not more than the EQ and SPQ can
* accommodate.
*/
if ((type == NONE_CONNECTION_TYPE) ||
(type == ETH_CONNECTION_TYPE)) {
if (!atomic_read(&bp->spq_left))
if (type == ETH_CONNECTION_TYPE) {
if (!atomic_read(&bp->cq_spq_left))
break;
else
atomic_dec(&bp->spq_left);
atomic_dec(&bp->cq_spq_left);
} else if (type == NONE_CONNECTION_TYPE) {
if (!atomic_read(&bp->eq_spq_left))
break;
else
atomic_dec(&bp->eq_spq_left);
} else if ((type == ISCSI_CONNECTION_TYPE) ||
(type == FCOE_CONNECTION_TYPE)) {
if (bp->cnic_spq_pending >=
@ -9886,7 +10229,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
int rc = 0;
mutex_lock(&bp->cnic_mutex);
c_ops = bp->cnic_ops;
c_ops = rcu_dereference_protected(bp->cnic_ops,
lockdep_is_held(&bp->cnic_mutex));
if (c_ops)
rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
mutex_unlock(&bp->cnic_mutex);
@ -10000,7 +10344,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
int count = ctl->data.credit.credit_count;
smp_mb__before_atomic_inc();
atomic_add(count, &bp->spq_left);
atomic_add(count, &bp->cq_spq_left);
smp_mb__after_atomic_inc();
break;
}
@ -10096,6 +10440,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
/* If both iSCSI and FCoE are disabled - return NULL in
* order to indicate CNIC that it should not try to work
* with this device.
*/
if (NO_ISCSI(bp) && NO_FCOE(bp))
return NULL;
cp->drv_owner = THIS_MODULE;
cp->chip_id = CHIP_ID(bp);
cp->pdev = bp->pdev;
@ -10116,6 +10467,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
if (NO_ISCSI(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
if (NO_FCOE(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
"starting cid %d\n",
cp->ctx_blk_size,

View file

@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
#define MDIO_PMA_REG_8727_PCS_GP 0xc842
#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
#define MDIO_AN_REG_8727_MISC_CTRL 0x8309

View file

@ -6,6 +6,9 @@ obj-$(CONFIG_BONDING) += bonding.o
bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
proc-$(CONFIG_PROC_FS) += bond_procfs.o
bonding-objs += $(proc-y)
ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
bonding-objs += $(ipv6-y)

View file

@ -246,7 +246,7 @@ static inline void __enable_port(struct port *port)
*/
static inline int __port_is_enabled(struct port *port)
{
return port->slave->state == BOND_STATE_ACTIVE;
return bond_is_active_slave(port->slave);
}
/**

View file

@ -604,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
_lock_rx_hashtbl(bond);
hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src));
hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->assigned) {

View file

@ -59,15 +59,12 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/if_ether.h>
#include <net/arp.h>
@ -174,9 +171,6 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
atomic_t netpoll_block_tx = ATOMIC_INIT(0);
#endif
static const char * const version =
DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
int bond_net_id __read_mostly;
static __be32 arp_target[BOND_MAX_ARP_TARGETS];
@ -246,7 +240,7 @@ static void bond_uninit(struct net_device *bond_dev);
/*---------------------------- General routines -----------------------------*/
static const char *bond_mode_name(int mode)
const char *bond_mode_name(int mode)
{
static const char *names[] = {
[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
@ -424,15 +418,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
{
skb->dev = slave_dev;
skb->priority = 1;
#ifdef CONFIG_NET_POLL_CONTROLLER
if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
struct netpoll *np = bond->dev->npinfo->netpoll;
slave_dev->npinfo = bond->dev->npinfo;
slave_dev->priv_flags |= IFF_IN_NETPOLL;
netpoll_send_skb_on_dev(np, skb, slave_dev);
slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
} else
#endif
if (unlikely(netpoll_tx_running(slave_dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
dev_queue_xmit(skb);
return 0;
@ -1288,63 +1276,103 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* You must hold read lock on bond->lock before calling this.
*/
static bool slaves_support_netpoll(struct net_device *bond_dev)
static inline int slave_enable_netpoll(struct slave *slave)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
int i = 0;
bool ret = true;
struct netpoll *np;
int err = 0;
bond_for_each_slave(bond, slave, i) {
if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
!slave->dev->netdev_ops->ndo_poll_controller)
ret = false;
np = kzalloc(sizeof(*np), GFP_KERNEL);
err = -ENOMEM;
if (!np)
goto out;
np->dev = slave->dev;
err = __netpoll_setup(np);
if (err) {
kfree(np);
goto out;
}
return i != 0 && ret;
slave->np = np;
out:
return err;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
struct netpoll *np = slave->np;
if (!np)
return;
slave->np = NULL;
synchronize_rcu_bh();
__netpoll_cleanup(np);
kfree(np);
}
static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
{
if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
return false;
if (!slave_dev->netdev_ops->ndo_poll_controller)
return false;
return true;
}
static void bond_poll_controller(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
}
static void __bond_netpoll_cleanup(struct bonding *bond)
{
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i) {
if (slave->dev && IS_UP(slave->dev))
netpoll_poll_dev(slave->dev);
}
bond_for_each_slave(bond, slave, i)
if (IS_UP(slave->dev))
slave_disable_netpoll(slave);
}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
const struct net_device_ops *ops;
int i;
read_lock(&bond->lock);
bond_dev->npinfo = NULL;
bond_for_each_slave(bond, slave, i) {
if (slave->dev) {
ops = slave->dev->netdev_ops;
if (ops->ndo_netpoll_cleanup)
ops->ndo_netpoll_cleanup(slave->dev);
else
slave->dev->npinfo = NULL;
}
}
__bond_netpoll_cleanup(bond);
read_unlock(&bond->lock);
}
#else
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct bonding *bond = netdev_priv(dev);
struct slave *slave;
int i, err = 0;
read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i) {
err = slave_enable_netpoll(slave);
if (err) {
__bond_netpoll_cleanup(bond);
break;
}
}
read_unlock(&bond->lock);
return err;
}
static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
{
return bond->dev->npinfo;
}
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
return 0;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
#endif
/*---------------------------------- IOCTL ----------------------------------*/
@ -1372,8 +1400,8 @@ static int bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
unsigned long features = bond_dev->features;
unsigned long vlan_features = 0;
u32 features = bond_dev->features;
u32 vlan_features = 0;
unsigned short max_hard_header_len = max((u16)ETH_HLEN,
bond_dev->hard_header_len);
int i;
@ -1400,8 +1428,8 @@ static int bond_compute_features(struct bonding *bond)
done:
features |= (bond_dev->features & BOND_VLAN_FEATURES);
bond_dev->features = netdev_fix_features(features, NULL);
bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL);
bond_dev->features = netdev_fix_features(bond_dev, features);
bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
bond_dev->hard_header_len = max_hard_header_len;
return 0;
@ -1423,6 +1451,77 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
bond->setup_by_slave = 1;
}
/* On bonding slaves other than the currently active slave, suppress
* duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
* ARP on active-backup slaves with arp_validate enabled.
*/
static bool bond_should_deliver_exact_match(struct sk_buff *skb,
struct slave *slave,
struct bonding *bond)
{
if (bond_is_slave_inactive(slave)) {
if (slave_do_arp_validate(bond, slave) &&
skb->protocol == __cpu_to_be16(ETH_P_ARP))
return false;
if (bond->params.mode == BOND_MODE_ALB &&
skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
return false;
if (bond->params.mode == BOND_MODE_8023AD &&
skb->protocol == __cpu_to_be16(ETH_P_SLOW))
return false;
return true;
}
return false;
}
static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct slave *slave;
struct net_device *bond_dev;
struct bonding *bond;
slave = bond_slave_get_rcu(skb->dev);
bond_dev = ACCESS_ONCE(slave->dev->master);
if (unlikely(!bond_dev))
return RX_HANDLER_PASS;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return RX_HANDLER_CONSUMED;
*pskb = skb;
bond = netdev_priv(bond_dev);
if (bond->params.arp_interval)
slave->dev->last_rx = jiffies;
if (bond_should_deliver_exact_match(skb, slave, bond)) {
return RX_HANDLER_EXACT;
}
skb->dev = bond_dev;
if (bond->params.mode == BOND_MODE_ALB &&
bond_dev->priv_flags & IFF_BRIDGE_PORT &&
skb->pkt_type == PACKET_HOST) {
if (unlikely(skb_cow_head(skb,
skb->data - skb_mac_header(skb)))) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN);
}
return RX_HANDLER_ANOTHER;
}
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@ -1594,16 +1693,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
res = netdev_set_master(slave_dev, bond_dev);
res = netdev_set_bond_master(slave_dev, bond_dev);
if (res) {
pr_debug("Error %d calling netdev_set_master\n", res);
pr_debug("Error %d calling netdev_set_bond_master\n", res);
goto err_restore_mac;
}
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
pr_debug("Error %d calling netdev_rx_handler_register\n", res);
goto err_unset_master;
}
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
pr_debug("Opening slave %s failed\n", slave_dev->name);
goto err_unset_master;
goto err_unreg_rxhandler;
}
new_slave->dev = slave_dev;
@ -1757,7 +1863,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
break;
case BOND_MODE_TLB:
case BOND_MODE_ALB:
new_slave->state = BOND_STATE_ACTIVE;
bond_set_active_slave(new_slave);
bond_set_slave_inactive_flags(new_slave);
bond_select_active_slave(bond);
break;
@ -1765,7 +1871,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
pr_debug("This slave is always active in trunk mode\n");
/* always active in trunk mode */
new_slave->state = BOND_STATE_ACTIVE;
bond_set_active_slave(new_slave);
/* In trunking mode there is little meaning to curr_active_slave
* anyway (it holds no special properties of the bond device),
@ -1782,17 +1888,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
#ifdef CONFIG_NET_POLL_CONTROLLER
if (slaves_support_netpoll(bond_dev)) {
bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
if (bond_dev->npinfo)
slave_dev->npinfo = bond_dev->npinfo;
} else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("New slave device %s does not support netpoll\n",
slave_dev->name);
pr_info("Disabling netpoll support for %s\n", bond_dev->name);
slave_dev->npinfo = bond_netpoll_info(bond);
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
read_unlock(&bond->lock);
pr_info("Error, %s: master_dev is using netpoll, "
"but new slave device does not support netpoll.\n",
bond_dev->name);
res = -EBUSY;
goto err_close;
}
}
#endif
read_unlock(&bond->lock);
res = bond_create_slave_symlinks(bond_dev, slave_dev);
@ -1801,7 +1909,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
bond_dev->name, slave_dev->name,
new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
bond_is_active_slave(new_slave) ? "n active" : " backup",
new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
/* enslave is successful */
@ -1811,8 +1919,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
err_close:
dev_close(slave_dev);
err_unreg_rxhandler:
netdev_rx_handler_unregister(slave_dev);
synchronize_net();
err_unset_master:
netdev_set_master(slave_dev, NULL);
netdev_set_bond_master(slave_dev, NULL);
err_restore_mac:
if (!bond->params.fail_over_mac) {
@ -1895,7 +2007,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
pr_info("%s: releasing %s interface %s\n",
bond_dev->name,
(slave->state == BOND_STATE_ACTIVE) ? "active" : "backup",
bond_is_active_slave(slave) ? "active" : "backup",
slave_dev->name);
oldcurrent = bond->curr_active_slave;
@ -1992,19 +2104,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_unlock_bh(bond_dev);
}
netdev_set_master(slave_dev, NULL);
netdev_rx_handler_unregister(slave_dev);
synchronize_net();
netdev_set_bond_master(slave_dev, NULL);
#ifdef CONFIG_NET_POLL_CONTROLLER
read_lock_bh(&bond->lock);
if (slaves_support_netpoll(bond_dev))
bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
read_unlock_bh(&bond->lock);
if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
else
slave_dev->npinfo = NULL;
#endif
slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
dev_close(slave_dev);
@ -2018,9 +2122,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
dev_set_mtu(slave_dev, slave->original_mtu);
slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
IFF_SLAVE_INACTIVE | IFF_BONDING |
IFF_SLAVE_NEEDARP);
slave_dev->priv_flags &= ~IFF_BONDING;
kfree(slave);
@ -2039,6 +2141,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
ret = bond_release(bond_dev, slave_dev);
if ((ret == 0) && (bond->slave_cnt == 0)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("%s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
@ -2114,7 +2217,11 @@ static int bond_release_all(struct net_device *bond_dev)
netif_addr_unlock_bh(bond_dev);
}
netdev_set_master(slave_dev, NULL);
netdev_rx_handler_unregister(slave_dev);
synchronize_net();
netdev_set_bond_master(slave_dev, NULL);
slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
dev_close(slave_dev);
@ -2126,9 +2233,6 @@ static int bond_release_all(struct net_device *bond_dev)
dev_set_mac_address(slave_dev, &addr);
}
slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
IFF_SLAVE_INACTIVE);
kfree(slave);
/* re-acquire the lock before getting the next slave */
@ -2242,7 +2346,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
res = 0;
strcpy(info->slave_name, slave->dev->name);
info->link = slave->link;
info->state = slave->state;
info->state = bond_slave_state(slave);
info->link_failure_count = slave->link_failure_count;
break;
}
@ -2281,7 +2385,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond->dev->name,
(bond->params.mode ==
BOND_MODE_ACTIVEBACKUP) ?
((slave->state == BOND_STATE_ACTIVE) ?
(bond_is_active_slave(slave) ?
"active " : "backup ") : "",
slave->dev->name,
bond->params.downdelay * bond->params.miimon);
@ -2372,13 +2476,13 @@ static void bond_miimon_commit(struct bonding *bond)
if (bond->params.mode == BOND_MODE_8023AD) {
/* prevent it from being the active one */
slave->state = BOND_STATE_BACKUP;
bond_set_backup_slave(slave);
} else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
slave->state = BOND_STATE_ACTIVE;
bond_set_active_slave(slave);
} else if (slave != bond->primary_slave) {
/* prevent it from being the active one */
slave->state = BOND_STATE_BACKUP;
bond_set_backup_slave(slave);
}
bond_update_speed_duplex(slave);
@ -2571,11 +2675,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
int i, vlan_id, rv;
int i, vlan_id;
__be32 *targets = bond->params.arp_targets;
struct vlan_entry *vlan;
struct net_device *vlan_dev;
struct flowi fl;
struct rtable *rt;
for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
@ -2594,15 +2697,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
* determine which VLAN interface would be used, so we
* can tag the ARP with the proper VLAN tag.
*/
memset(&fl, 0, sizeof(fl));
fl.fl4_dst = targets[i];
fl.fl4_tos = RTO_ONLINK;
rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
if (rv) {
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
RTO_ONLINK, 0);
if (IS_ERR(rt)) {
if (net_ratelimit()) {
pr_warning("%s: no route to arp_ip_target %pI4\n",
bond->dev->name, &fl.fl4_dst);
bond->dev->name, &targets[i]);
}
continue;
}
@ -2638,7 +2738,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
if (net_ratelimit()) {
pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
bond->dev->name, &fl.fl4_dst,
bond->dev->name, &targets[i],
rt->dst.dev ? rt->dst.dev->name : "NULL");
}
ip_rt_put(rt);
@ -2756,7 +2856,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
memcpy(&tip, arp_ptr, 4);
pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
bond->dev->name, slave->dev->name, slave->state,
bond->dev->name, slave->dev->name, bond_slave_state(slave),
bond->params.arp_validate, slave_do_arp_validate(bond, slave),
&sip, &tip);
@ -2768,7 +2868,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
* the active, through one switch, the router, then the other
* switch before reaching the backup.
*/
if (slave->state == BOND_STATE_ACTIVE)
if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
else
bond_validate_arp(bond, slave, tip, sip);
@ -2830,7 +2930,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
slave->dev->last_rx + delta_in_ticks)) {
slave->link = BOND_LINK_UP;
slave->state = BOND_STATE_ACTIVE;
bond_set_active_slave(slave);
/* primary_slave has no meaning in round-robin
* mode. the window of a slave being up and
@ -2863,7 +2963,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
slave->dev->last_rx + 2 * delta_in_ticks)) {
slave->link = BOND_LINK_DOWN;
slave->state = BOND_STATE_BACKUP;
bond_set_backup_slave(slave);
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@ -2957,7 +3057,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* gives each slave a chance to tx/rx traffic
* before being taken out
*/
if (slave->state == BOND_STATE_BACKUP &&
if (!bond_is_active_slave(slave) &&
!bond->current_arp_slave &&
!time_in_range(jiffies,
slave_last_rx(bond, slave) - delta_in_ticks,
@ -2974,7 +3074,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* the bond has an IP address)
*/
trans_start = dev_trans_start(slave->dev);
if ((slave->state == BOND_STATE_ACTIVE) &&
if (bond_is_active_slave(slave) &&
(!time_in_range(jiffies,
trans_start - delta_in_ticks,
trans_start + 2 * delta_in_ticks) ||
@ -3182,299 +3282,6 @@ out:
read_unlock(&bond->lock);
}
/*------------------------------ proc/seq_file-------------------------------*/
#ifdef CONFIG_PROC_FS
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
__acquires(&bond->lock)
{
struct bonding *bond = seq->private;
loff_t off = 0;
struct slave *slave;
int i;
/* make sure the bond won't be taken away */
rcu_read_lock();
read_lock(&bond->lock);
if (*pos == 0)
return SEQ_START_TOKEN;
bond_for_each_slave(bond, slave, i) {
if (++off == *pos)
return slave;
}
return NULL;
}
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bonding *bond = seq->private;
struct slave *slave = v;
++*pos;
if (v == SEQ_START_TOKEN)
return bond->first_slave;
slave = slave->next;
return (slave == bond->first_slave) ? NULL : slave;
}
static void bond_info_seq_stop(struct seq_file *seq, void *v)
__releases(&bond->lock)
__releases(RCU)
{
struct bonding *bond = seq->private;
read_unlock(&bond->lock);
rcu_read_unlock();
}
static void bond_info_show_master(struct seq_file *seq)
{
struct bonding *bond = seq->private;
struct slave *curr;
int i;
read_lock(&bond->curr_slave_lock);
curr = bond->curr_active_slave;
read_unlock(&bond->curr_slave_lock);
seq_printf(seq, "Bonding Mode: %s",
bond_mode_name(bond->params.mode));
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac)
seq_printf(seq, " (fail_over_mac %s)",
fail_over_mac_tbl[bond->params.fail_over_mac].modename);
seq_printf(seq, "\n");
if (bond->params.mode == BOND_MODE_XOR ||
bond->params.mode == BOND_MODE_8023AD) {
seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
xmit_hashtype_tbl[bond->params.xmit_policy].modename,
bond->params.xmit_policy);
}
if (USES_PRIMARY(bond->params.mode)) {
seq_printf(seq, "Primary Slave: %s",
(bond->primary_slave) ?
bond->primary_slave->dev->name : "None");
if (bond->primary_slave)
seq_printf(seq, " (primary_reselect %s)",
pri_reselect_tbl[bond->params.primary_reselect].modename);
seq_printf(seq, "\nCurrently Active Slave: %s\n",
(curr) ? curr->dev->name : "None");
}
seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
"up" : "down");
seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
seq_printf(seq, "Up Delay (ms): %d\n",
bond->params.updelay * bond->params.miimon);
seq_printf(seq, "Down Delay (ms): %d\n",
bond->params.downdelay * bond->params.miimon);
/* ARP information */
if (bond->params.arp_interval > 0) {
int printed = 0;
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
bond->params.arp_interval);
seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
if (!bond->params.arp_targets[i])
break;
if (printed)
seq_printf(seq, ",");
seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
printed = 1;
}
seq_printf(seq, "\n");
}
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
seq_puts(seq, "\n802.3ad info\n");
seq_printf(seq, "LACP rate: %s\n",
(bond->params.lacp_fast) ? "fast" : "slow");
seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
ad_select_tbl[bond->params.ad_select].modename);
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
seq_printf(seq, "bond %s has no active aggregator\n",
bond->dev->name);
} else {
seq_printf(seq, "Active Aggregator Info:\n");
seq_printf(seq, "\tAggregator ID: %d\n",
ad_info.aggregator_id);
seq_printf(seq, "\tNumber of ports: %d\n",
ad_info.ports);
seq_printf(seq, "\tActor Key: %d\n",
ad_info.actor_key);
seq_printf(seq, "\tPartner Key: %d\n",
ad_info.partner_key);
seq_printf(seq, "\tPartner Mac Address: %pM\n",
ad_info.partner_system);
}
}
}
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
struct bonding *bond = seq->private;
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n",
(slave->link == BOND_LINK_UP) ? "up" : "down");
seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
seq_printf(seq, "Link Failure Count: %u\n",
slave->link_failure_count);
seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
if (bond->params.mode == BOND_MODE_8023AD) {
const struct aggregator *agg
= SLAVE_AD_INFO(slave).port.aggregator;
if (agg)
seq_printf(seq, "Aggregator ID: %d\n",
agg->aggregator_identifier);
else
seq_puts(seq, "Aggregator ID: N/A\n");
}
seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
}
static int bond_info_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%s\n", version);
bond_info_show_master(seq);
} else
bond_info_show_slave(seq, v);
return 0;
}
static const struct seq_operations bond_info_seq_ops = {
.start = bond_info_seq_start,
.next = bond_info_seq_next,
.stop = bond_info_seq_stop,
.show = bond_info_seq_show,
};
static int bond_info_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
struct proc_dir_entry *proc;
int res;
res = seq_open(file, &bond_info_seq_ops);
if (!res) {
/* recover the pointer buried in proc_dir_entry data */
seq = file->private_data;
proc = PDE(inode);
seq->private = proc->data;
}
return res;
}
static const struct file_operations bond_info_fops = {
.owner = THIS_MODULE,
.open = bond_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void bond_create_proc_entry(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
if (bn->proc_dir) {
bond->proc_entry = proc_create_data(bond_dev->name,
S_IRUGO, bn->proc_dir,
&bond_info_fops, bond);
if (bond->proc_entry == NULL)
pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
DRV_NAME, bond_dev->name);
else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
}
}
static void bond_remove_proc_entry(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
if (bn->proc_dir && bond->proc_entry) {
remove_proc_entry(bond->proc_file_name, bn->proc_dir);
memset(bond->proc_file_name, 0, IFNAMSIZ);
bond->proc_entry = NULL;
}
}
/* Create the bonding directory under /proc/net, if doesn't exist yet.
* Caller must hold rtnl_lock.
*/
static void __net_init bond_create_proc_dir(struct bond_net *bn)
{
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
if (!bn->proc_dir)
pr_warning("Warning: cannot create /proc/net/%s\n",
DRV_NAME);
}
}
/* Destroy the bonding directory under /proc/net, if empty.
* Caller must hold rtnl_lock.
*/
static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
if (bn->proc_dir) {
remove_proc_entry(DRV_NAME, bn->net->proc_net);
bn->proc_dir = NULL;
}
}
#else /* !CONFIG_PROC_FS */
static void bond_create_proc_entry(struct bonding *bond)
{
}
static void bond_remove_proc_entry(struct bonding *bond)
{
}
static inline void bond_create_proc_dir(struct bond_net *bn)
{
}
static inline void bond_destroy_proc_dir(struct bond_net *bn)
{
}
#endif /* CONFIG_PROC_FS */
/*-------------------------- netdev event handling --------------------------*/
/*
@ -4331,7 +4138,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
bond_for_each_slave_from(bond, slave, i, start_at) {
if (IS_UP(slave->dev) &&
(slave->link == BOND_LINK_UP) &&
(slave->state == BOND_STATE_ACTIVE)) {
bond_is_active_slave(slave)) {
res = bond_dev_queue_xmit(bond, skb, slave->dev);
break;
}
@ -4408,7 +4215,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
bond_for_each_slave_from(bond, slave, i, start_at) {
if (IS_UP(slave->dev) &&
(slave->link == BOND_LINK_UP) &&
(slave->state == BOND_STATE_ACTIVE)) {
bond_is_active_slave(slave)) {
res = bond_dev_queue_xmit(bond, skb, slave->dev);
break;
}
@ -4449,7 +4256,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
bond_for_each_slave_from(bond, slave, i, start_at) {
if (IS_UP(slave->dev) &&
(slave->link == BOND_LINK_UP) &&
(slave->state == BOND_STATE_ACTIVE)) {
bond_is_active_slave(slave)) {
if (tx_dev) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
@ -4537,11 +4344,18 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
{
/*
* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips the a call to
* destination queue. Using a helper function skips a call to
* skb_tx_hash and will put the skbs in the queue we expect on their
* way down to the bonding driver.
*/
return skb->queue_mapping;
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
if (unlikely(txq >= dev->real_num_tx_queues)) {
do
txq -= dev->real_num_tx_queues;
while (txq >= dev->real_num_tx_queues);
}
return txq;
}
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
@ -4603,11 +4417,9 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
case BOND_MODE_BROADCAST:
break;
case BOND_MODE_8023AD:
bond_set_master_3ad_flags(bond);
bond_set_xmit_hash_policy(bond);
break;
case BOND_MODE_ALB:
bond_set_master_alb_flags(bond);
/* FALLTHRU */
case BOND_MODE_TLB:
break;
@ -4654,9 +4466,12 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = bond_netpoll_setup,
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
.ndo_poll_controller = bond_poll_controller,
#endif
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
};
static void bond_destructor(struct net_device *bond_dev)
@ -4695,9 +4510,6 @@ static void bond_setup(struct net_device *bond_dev)
bond_dev->priv_flags |= IFF_BONDING;
bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
if (bond->params.arp_interval)
bond_dev->priv_flags |= IFF_MASTER_ARPMON;
/* At first, we block adding VLANs. That's the only way to
* prevent problems that occur when adding VLANs over an
* empty bond. The block will be removed once non-challenged
@ -5166,8 +4978,6 @@ static int bond_init(struct net_device *bond_dev)
bond_set_lockdep_class(bond_dev);
netif_carrier_off(bond_dev);
bond_create_proc_entry(bond);
list_add_tail(&bond->bond_list, &bn->dev_list);
@ -5237,6 +5047,8 @@ int bond_create(struct net *net, const char *name)
res = register_netdevice(bond_dev);
netif_carrier_off(bond_dev);
out:
rtnl_unlock();
if (res < 0)
@ -5275,7 +5087,7 @@ static int __init bonding_init(void)
int i;
int res;
pr_info("%s", version);
pr_info("%s", bond_version);
res = bond_check_params(&bonding_defaults);
if (res)

View file

@ -0,0 +1,275 @@
#include <linux/proc_fs.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "bonding.h"
extern const char *bond_mode_name(int mode);
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
__acquires(&bond->lock)
{
struct bonding *bond = seq->private;
loff_t off = 0;
struct slave *slave;
int i;
/* make sure the bond won't be taken away */
rcu_read_lock();
read_lock(&bond->lock);
if (*pos == 0)
return SEQ_START_TOKEN;
bond_for_each_slave(bond, slave, i) {
if (++off == *pos)
return slave;
}
return NULL;
}
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bonding *bond = seq->private;
struct slave *slave = v;
++*pos;
if (v == SEQ_START_TOKEN)
return bond->first_slave;
slave = slave->next;
return (slave == bond->first_slave) ? NULL : slave;
}
static void bond_info_seq_stop(struct seq_file *seq, void *v)
__releases(&bond->lock)
__releases(RCU)
{
struct bonding *bond = seq->private;
read_unlock(&bond->lock);
rcu_read_unlock();
}
static void bond_info_show_master(struct seq_file *seq)
{
struct bonding *bond = seq->private;
struct slave *curr;
int i;
read_lock(&bond->curr_slave_lock);
curr = bond->curr_active_slave;
read_unlock(&bond->curr_slave_lock);
seq_printf(seq, "Bonding Mode: %s",
bond_mode_name(bond->params.mode));
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac)
seq_printf(seq, " (fail_over_mac %s)",
fail_over_mac_tbl[bond->params.fail_over_mac].modename);
seq_printf(seq, "\n");
if (bond->params.mode == BOND_MODE_XOR ||
bond->params.mode == BOND_MODE_8023AD) {
seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
xmit_hashtype_tbl[bond->params.xmit_policy].modename,
bond->params.xmit_policy);
}
if (USES_PRIMARY(bond->params.mode)) {
seq_printf(seq, "Primary Slave: %s",
(bond->primary_slave) ?
bond->primary_slave->dev->name : "None");
if (bond->primary_slave)
seq_printf(seq, " (primary_reselect %s)",
pri_reselect_tbl[bond->params.primary_reselect].modename);
seq_printf(seq, "\nCurrently Active Slave: %s\n",
(curr) ? curr->dev->name : "None");
}
seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
"up" : "down");
seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
seq_printf(seq, "Up Delay (ms): %d\n",
bond->params.updelay * bond->params.miimon);
seq_printf(seq, "Down Delay (ms): %d\n",
bond->params.downdelay * bond->params.miimon);
/* ARP information */
if (bond->params.arp_interval > 0) {
int printed = 0;
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
bond->params.arp_interval);
seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
if (!bond->params.arp_targets[i])
break;
if (printed)
seq_printf(seq, ",");
seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
printed = 1;
}
seq_printf(seq, "\n");
}
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
seq_puts(seq, "\n802.3ad info\n");
seq_printf(seq, "LACP rate: %s\n",
(bond->params.lacp_fast) ? "fast" : "slow");
seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
ad_select_tbl[bond->params.ad_select].modename);
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
seq_printf(seq, "bond %s has no active aggregator\n",
bond->dev->name);
} else {
seq_printf(seq, "Active Aggregator Info:\n");
seq_printf(seq, "\tAggregator ID: %d\n",
ad_info.aggregator_id);
seq_printf(seq, "\tNumber of ports: %d\n",
ad_info.ports);
seq_printf(seq, "\tActor Key: %d\n",
ad_info.actor_key);
seq_printf(seq, "\tPartner Key: %d\n",
ad_info.partner_key);
seq_printf(seq, "\tPartner Mac Address: %pM\n",
ad_info.partner_system);
}
}
}
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
struct bonding *bond = seq->private;
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n",
(slave->link == BOND_LINK_UP) ? "up" : "down");
seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
seq_printf(seq, "Link Failure Count: %u\n",
slave->link_failure_count);
seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
if (bond->params.mode == BOND_MODE_8023AD) {
const struct aggregator *agg
= SLAVE_AD_INFO(slave).port.aggregator;
if (agg)
seq_printf(seq, "Aggregator ID: %d\n",
agg->aggregator_identifier);
else
seq_puts(seq, "Aggregator ID: N/A\n");
}
seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
}
static int bond_info_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%s\n", bond_version);
bond_info_show_master(seq);
} else
bond_info_show_slave(seq, v);
return 0;
}
static const struct seq_operations bond_info_seq_ops = {
.start = bond_info_seq_start,
.next = bond_info_seq_next,
.stop = bond_info_seq_stop,
.show = bond_info_seq_show,
};
static int bond_info_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
struct proc_dir_entry *proc;
int res;
res = seq_open(file, &bond_info_seq_ops);
if (!res) {
/* recover the pointer buried in proc_dir_entry data */
seq = file->private_data;
proc = PDE(inode);
seq->private = proc->data;
}
return res;
}
static const struct file_operations bond_info_fops = {
.owner = THIS_MODULE,
.open = bond_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
void bond_create_proc_entry(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
if (bn->proc_dir) {
bond->proc_entry = proc_create_data(bond_dev->name,
S_IRUGO, bn->proc_dir,
&bond_info_fops, bond);
if (bond->proc_entry == NULL)
pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
DRV_NAME, bond_dev->name);
else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
}
}
void bond_remove_proc_entry(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
if (bn->proc_dir && bond->proc_entry) {
remove_proc_entry(bond->proc_file_name, bn->proc_dir);
memset(bond->proc_file_name, 0, IFNAMSIZ);
bond->proc_entry = NULL;
}
}
/* Create the bonding directory under /proc/net, if doesn't exist yet.
* Caller must hold rtnl_lock.
*/
void __net_init bond_create_proc_dir(struct bond_net *bn)
{
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
if (!bn->proc_dir)
pr_warning("Warning: cannot create /proc/net/%s\n",
DRV_NAME);
}
}
/* Destroy the bonding directory under /proc/net, if empty.
* Caller must hold rtnl_lock.
*/
void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
if (bn->proc_dir) {
remove_proc_entry(DRV_NAME, bn->net->proc_net);
bn->proc_dir = NULL;
}
}

View file

@ -118,7 +118,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
pr_info("%s is being created...\n", ifname);
rv = bond_create(net, ifname);
if (rv) {
pr_info("Bond creation failed.\n");
if (rv == -EEXIST)
pr_info("%s already exists.\n", ifname);
else
pr_info("%s creation failed.\n", ifname);
res = rv;
}
} else if (command[0] == '-') {
@ -322,11 +325,6 @@ static ssize_t bonding_store_mode(struct device *d,
ret = -EINVAL;
goto out;
}
if (bond->params.mode == BOND_MODE_8023AD)
bond_unset_master_3ad_flags(bond);
if (bond->params.mode == BOND_MODE_ALB)
bond_unset_master_alb_flags(bond);
bond->params.mode = new_value;
bond_set_mode_ops(bond, bond->params.mode);
@ -527,8 +525,6 @@ static ssize_t bonding_store_arp_interval(struct device *d,
pr_info("%s: Setting ARP monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.arp_interval = new_value;
if (bond->params.arp_interval)
bond->dev->priv_flags |= IFF_MASTER_ARPMON;
if (bond->params.miimon) {
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name);
@ -1004,7 +1000,6 @@ static ssize_t bonding_store_miimon(struct device *d,
pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
bond->dev->priv_flags &= ~IFF_MASTER_ARPMON;
if (bond->params.arp_validate) {
bond_unregister_arp(bond);
bond->params.arp_validate =
@ -1198,7 +1193,7 @@ static ssize_t bonding_store_carrier(struct device *d,
bond->dev->name, new_value);
}
out:
return count;
return ret;
}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
bonding_show_carrier, bonding_store_carrier);
@ -1587,15 +1582,15 @@ static ssize_t bonding_store_slaves_active(struct device *d,
}
bond_for_each_slave(bond, slave, i) {
if (slave->state == BOND_STATE_BACKUP) {
if (!bond_is_active_slave(slave)) {
if (new_value)
slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE;
slave->inactive = 0;
else
slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
slave->inactive = 1;
}
}
out:
return count;
return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
bonding_show_slaves_active, bonding_store_slaves_active);

View file

@ -20,6 +20,7 @@
#include <linux/if_bonding.h>
#include <linux/cpumask.h>
#include <linux/in6.h>
#include <linux/netpoll.h>
#include "bond_3ad.h"
#include "bond_alb.h"
@ -28,6 +29,8 @@
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
#define BOND_MAX_ARP_TARGETS 16
#define IS_UP(dev) \
@ -52,7 +55,7 @@
(((slave)->dev->flags & IFF_UP) && \
netif_running((slave)->dev) && \
((slave)->link == BOND_LINK_UP) && \
((slave)->state == BOND_STATE_ACTIVE))
bond_is_active_slave(slave))
#define USES_PRIMARY(mode) \
@ -132,7 +135,7 @@ static inline void unblock_netpoll_tx(void)
static inline int is_netpoll_tx_blocked(struct net_device *dev)
{
if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
if (unlikely(netpoll_tx_running(dev)))
return atomic_read(&netpoll_block_tx);
return 0;
}
@ -189,7 +192,9 @@ struct slave {
unsigned long last_arp_rx;
s8 link; /* one of BOND_LINK_XXXX */
s8 new_link;
s8 state; /* one of BOND_STATE_XXXX */
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
inactive:1; /* indicates inactive slave */
u32 original_mtu;
u32 link_failure_count;
u8 perm_hwaddr[ETH_ALEN];
@ -198,6 +203,9 @@ struct slave {
u16 queue_id;
struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
struct tlb_slave_info tlb_info;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
};
/*
@ -260,12 +268,16 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */
};
#define bond_slave_get_rcu(dev) \
((struct slave *) rcu_dereference(dev->rx_handler_data))
/**
* Returns NULL if the net_device does not belong to any of the bond's slaves
*
* Caller must hold bond lock for read
*/
static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev)
static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
struct net_device *slave_dev)
{
struct slave *slave = NULL;
int i;
@ -276,7 +288,7 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
}
}
return 0;
return NULL;
}
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@ -294,6 +306,26 @@ static inline bool bond_is_lb(const struct bonding *bond)
bond->params.mode == BOND_MODE_ALB);
}
static inline void bond_set_active_slave(struct slave *slave)
{
slave->backup = 0;
}
static inline void bond_set_backup_slave(struct slave *slave)
{
slave->backup = 1;
}
static inline int bond_slave_state(struct slave *slave)
{
return slave->backup;
}
static inline bool bond_is_active_slave(struct slave *slave)
{
return !bond_slave_state(slave);
}
#define BOND_PRI_RESELECT_ALWAYS 0
#define BOND_PRI_RESELECT_BETTER 1
#define BOND_PRI_RESELECT_FAILURE 2
@ -311,7 +343,7 @@ static inline bool bond_is_lb(const struct bonding *bond)
static inline int slave_do_arp_validate(struct bonding *bond,
struct slave *slave)
{
return bond->params.arp_validate & (1 << slave->state);
return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
static inline unsigned long slave_last_rx(struct bonding *bond,
@ -323,41 +355,40 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
return slave->dev->last_rx;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline void bond_netpoll_send_skb(const struct slave *slave,
struct sk_buff *skb)
{
struct netpoll *np = slave->np;
if (np)
netpoll_send_skb(np, skb);
}
#else
static inline void bond_netpoll_send_skb(const struct slave *slave,
struct sk_buff *skb)
{
}
#endif
static inline void bond_set_slave_inactive_flags(struct slave *slave)
{
struct bonding *bond = netdev_priv(slave->dev->master);
if (!bond_is_lb(bond))
slave->state = BOND_STATE_BACKUP;
bond_set_backup_slave(slave);
if (!bond->params.all_slaves_active)
slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
if (slave_do_arp_validate(bond, slave))
slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
slave->inactive = 1;
}
static inline void bond_set_slave_active_flags(struct slave *slave)
{
slave->state = BOND_STATE_ACTIVE;
slave->dev->priv_flags &= ~(IFF_SLAVE_INACTIVE | IFF_SLAVE_NEEDARP);
bond_set_active_slave(slave);
slave->inactive = 0;
}
static inline void bond_set_master_3ad_flags(struct bonding *bond)
static inline bool bond_is_slave_inactive(struct slave *slave)
{
bond->dev->priv_flags |= IFF_MASTER_8023AD;
}
static inline void bond_unset_master_3ad_flags(struct bonding *bond)
{
bond->dev->priv_flags &= ~IFF_MASTER_8023AD;
}
static inline void bond_set_master_alb_flags(struct bonding *bond)
{
bond->dev->priv_flags |= IFF_MASTER_ALB;
}
static inline void bond_unset_master_alb_flags(struct bonding *bond)
{
bond->dev->priv_flags &= ~IFF_MASTER_ALB;
return slave->inactive;
}
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
@ -393,6 +424,30 @@ struct bond_net {
#endif
};
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);
void bond_remove_proc_entry(struct bonding *bond);
void bond_create_proc_dir(struct bond_net *bn);
void bond_destroy_proc_dir(struct bond_net *bn);
#else
static inline void bond_create_proc_entry(struct bonding *bond)
{
}
static inline void bond_remove_proc_entry(struct bonding *bond)
{
}
static inline void bond_create_proc_dir(struct bond_net *bn)
{
}
static inline void bond_destroy_proc_dir(struct bond_net *bn)
{
}
#endif
/* exported from bond_main.c */
extern int bond_net_id;
extern const struct bond_parm_tbl bond_lacp_tbl[];

View file

@ -115,6 +115,8 @@ source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"

View file

@ -13,6 +13,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o

View file

@ -0,0 +1,15 @@
menuconfig CAN_C_CAN
tristate "Bosch C_CAN devices"
depends on CAN_DEV && HAS_IOMEM
if CAN_C_CAN
config CAN_C_CAN_PLATFORM
tristate "Generic Platform Bus based C_CAN driver"
---help---
This driver adds support for the C_CAN chips connected to
the "platform bus" (Linux abstraction for directly to the
processor attached devices) which can be found on various
boards from ST Microelectronics (http://www.st.com)
like the SPEAr1310 and SPEAr320 evaluation boards.
endif

View file

@ -0,0 +1,8 @@
#
# Makefile for the Bosch C_CAN controller drivers.
#
obj-$(CONFIG_CAN_C_CAN) += c_can.o
obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,86 @@
/*
* CAN bus driver for Bosch C_CAN controller
*
* Copyright (C) 2010 ST Microelectronics
* Bhupesh Sharma <bhupesh.sharma@st.com>
*
* Borrowed heavily from the C_CAN driver originally written by:
* Copyright (C) 2007
* - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
* - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
*
* Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
* Bosch C_CAN user manual can be obtained from:
* http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
* users_manual_c_can.pdf
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#ifndef C_CAN_H
#define C_CAN_H
/* c_can IF registers */
struct c_can_if_regs {
u16 com_req;
u16 com_mask;
u16 mask1;
u16 mask2;
u16 arb1;
u16 arb2;
u16 msg_cntrl;
u16 data[4];
u16 _reserved[13];
};
/* c_can hardware registers */
struct c_can_regs {
u16 control;
u16 status;
u16 err_cnt;
u16 btr;
u16 interrupt;
u16 test;
u16 brp_ext;
u16 _reserved1;
struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
u16 _reserved2[8];
u16 txrqst1;
u16 txrqst2;
u16 _reserved3[6];
u16 newdat1;
u16 newdat2;
u16 _reserved4[6];
u16 intpnd1;
u16 intpnd2;
u16 _reserved5[6];
u16 msgval1;
u16 msgval2;
u16 _reserved6[6];
};
/* c_can private data structure */
struct c_can_priv {
struct can_priv can; /* must be the first member */
struct napi_struct napi;
struct net_device *dev;
int tx_object;
int current_status;
int last_status;
u16 (*read_reg) (struct c_can_priv *priv, void *reg);
void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
struct c_can_regs __iomem *regs;
unsigned long irq_flags; /* for request_irq() */
unsigned int tx_next;
unsigned int tx_echo;
void *priv; /* for board-specific data */
};
struct net_device *alloc_c_can_dev(void);
void free_c_can_dev(struct net_device *dev);
int register_c_can_dev(struct net_device *dev);
void unregister_c_can_dev(struct net_device *dev);
#endif /* C_CAN_H */

View file

@ -0,0 +1,215 @@
/*
* Platform CAN bus driver for Bosch C_CAN controller
*
* Copyright (C) 2010 ST Microelectronics
* Bhupesh Sharma <bhupesh.sharma@st.com>
*
* Borrowed heavily from the C_CAN driver originally written by:
* Copyright (C) 2007
* - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
* - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
*
* Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
* Bosch C_CAN user manual can be obtained from:
* http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
* users_manual_c_can.pdf
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/can/dev.h>
#include "c_can.h"
/*
* 16-bit c_can registers can be arranged differently in the memory
* architecture of different implementations. For example: 16-bit
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
*/
static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
void *reg)
{
return readw(reg);
}
static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
void *reg, u16 val)
{
writew(val, reg);
}
static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
void *reg)
{
return readw(reg + (long)reg - (long)priv->regs);
}
static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
void *reg, u16 val)
{
writew(val, reg + (long)reg - (long)priv->regs);
}
static int __devinit c_can_plat_probe(struct platform_device *pdev)
{
int ret;
void __iomem *addr;
struct net_device *dev;
struct c_can_priv *priv;
struct resource *mem, *irq;
#ifdef CONFIG_HAVE_CLK
struct clk *clk;
/* get the appropriate clk */
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "no clock defined\n");
ret = -ENODEV;
goto exit;
}
#endif
/* get the platform data */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!mem || (irq <= 0)) {
ret = -ENODEV;
goto exit_free_clk;
}
if (!request_mem_region(mem->start, resource_size(mem),
KBUILD_MODNAME)) {
dev_err(&pdev->dev, "resource unavailable\n");
ret = -ENODEV;
goto exit_free_clk;
}
addr = ioremap(mem->start, resource_size(mem));
if (!addr) {
dev_err(&pdev->dev, "failed to map can port\n");
ret = -ENOMEM;
goto exit_release_mem;
}
/* allocate the c_can device */
dev = alloc_c_can_dev();
if (!dev) {
ret = -ENOMEM;
goto exit_iounmap;
}
priv = netdev_priv(dev);
dev->irq = irq->start;
priv->regs = addr;
#ifdef CONFIG_HAVE_CLK
priv->can.clock.freq = clk_get_rate(clk);
priv->priv = clk;
#endif
switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_32BIT:
priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
break;
case IORESOURCE_MEM_16BIT:
default:
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
break;
}
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_c_can_dev(dev);
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
goto exit_free_device;
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
KBUILD_MODNAME, priv->regs, dev->irq);
return 0;
exit_free_device:
platform_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
exit_iounmap:
iounmap(addr);
exit_release_mem:
release_mem_region(mem->start, resource_size(mem));
exit_free_clk:
#ifdef CONFIG_HAVE_CLK
clk_put(clk);
exit:
#endif
dev_err(&pdev->dev, "probe failed\n");
return ret;
}
static int __devexit c_can_plat_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct c_can_priv *priv = netdev_priv(dev);
struct resource *mem;
unregister_c_can_dev(dev);
platform_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
iounmap(priv->regs);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
#ifdef CONFIG_HAVE_CLK
clk_put(priv->priv);
#endif
return 0;
}
static struct platform_driver c_can_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = c_can_plat_probe,
.remove = __devexit_p(c_can_plat_remove),
};
static int __init c_can_plat_init(void)
{
return platform_driver_register(&c_can_plat_driver);
}
module_init(c_can_plat_init);
static void __exit c_can_plat_exit(void)
{
platform_driver_unregister(&c_can_plat_driver);
}
module_exit(c_can_plat_exit);
MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");

View file

@ -659,7 +659,7 @@ failed:
static void unlink_all_urbs(struct esd_usb2 *dev)
{
struct esd_usb2_net_priv *priv;
int i;
int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
for (i = 0; i < dev->net_count; i++) {
@ -668,8 +668,8 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
usb_kill_anchored_urbs(&priv->tx_submitted);
atomic_set(&priv->active_tx_jobs, 0);
for (i = 0; i < MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
for (j = 0; j < MAX_TX_URBS; j++)
priv->tx_contexts[j].echo_index = MAX_TX_URBS;
}
}
}

View file

@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);
static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
/* helper function, assuming cnic_lock is held */
static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
{
return rcu_dereference_protected(cnic_ulp_tbl[type],
lockdep_is_held(&cnic_lock));
}
static int cnic_service_bnx2(void *, void *);
static int cnic_service_bnx2x(void *, void *);
@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl[ulp_type]) {
if (cnic_ulp_tbl_prot(ulp_type)) {
pr_err("%s: Type %d has already been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
return -EINVAL;
}
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl[ulp_type];
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
if (!ulp_ops) {
pr_err("%s: Type %d has not been registered\n",
__func__, ulp_type);
@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl[ulp_type] == NULL) {
if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
pr_err("%s: Driver with type %d has not been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
cp->ulp_handle[ulp_type] = ulp_ctx;
ulp_ops = cnic_ulp_tbl[ulp_type];
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
cnic_hold(dev);
@ -2970,7 +2977,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = cp->ulp_ops[if_type];
ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
lockdep_is_held(&cnic_lock));
if (!ulp_ops) {
mutex_unlock(&cnic_lock);
continue;
@ -2994,7 +3002,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = cp->ulp_ops[if_type];
ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
lockdep_is_held(&cnic_lock));
if (!ulp_ops || !ulp_ops->cnic_start) {
mutex_unlock(&cnic_lock);
continue;
@ -3058,7 +3067,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl[i];
ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_init) {
mutex_unlock(&cnic_lock);
continue;
@ -3082,7 +3091,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl[i];
ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_exit) {
mutex_unlock(&cnic_lock);
continue;
@ -3398,17 +3407,14 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
struct dst_entry **dst)
{
#if defined(CONFIG_INET)
struct flowi fl;
int err;
struct rtable *rt;
memset(&fl, 0, sizeof(fl));
fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
err = ip_route_output_key(&init_net, &rt, &fl);
if (!err)
rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
if (!IS_ERR(rt)) {
*dst = &rt->dst;
return err;
return 0;
}
return PTR_ERR(rt);
#else
return -ENETUNREACH;
#endif
@ -3418,14 +3424,14 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct dst_entry **dst)
{
#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
struct flowi fl;
struct flowi6 fl6;
memset(&fl, 0, sizeof(fl));
ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
fl.oif = dst_addr->sin6_scope_id;
memset(&fl6, 0, sizeof(fl6));
ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = dst_addr->sin6_scope_id;
*dst = ip6_route_output(&init_net, NULL, &fl);
*dst = ip6_route_output(&init_net, NULL, &fl6);
if (*dst)
return 0;
#endif
@ -4187,6 +4193,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
{
u32 max_conn;
max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
dev->max_iscsi_conn = max_conn;
}
static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@ -4511,6 +4525,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return err;
}
cnic_get_bnx2_iscsi_info(dev);
return 0;
}
@ -4722,129 +4738,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
cp->rx_cons = *cp->rx_cons_ptr;
}
static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
u32 lower_addr)
{
u32 val;
u8 mac[6];
val = CNIC_RD(dev, upper_addr);
mac[0] = (u8) (val >> 8);
mac[1] = (u8) val;
val = CNIC_RD(dev, lower_addr);
mac[2] = (u8) (val >> 24);
mac[3] = (u8) (val >> 16);
mac[4] = (u8) (val >> 8);
mac[5] = (u8) val;
if (is_valid_ether_addr(mac)) {
memcpy(dev->mac_addr, mac, 6);
return 0;
} else {
return -EINVAL;
}
}
static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 base, base2, addr, addr1, val;
int port = CNIC_PORT(cp);
dev->max_iscsi_conn = 0;
base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
if (base == 0)
return;
base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
MISC_REG_GENERIC_CR_0));
addr = BNX2X_SHMEM_ADDR(base,
dev_info.port_hw_config[port].iscsi_mac_upper);
addr1 = BNX2X_SHMEM_ADDR(base,
dev_info.port_hw_config[port].iscsi_mac_lower);
cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
val = CNIC_RD(dev, addr);
if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
u16 val16;
addr = BNX2X_SHMEM_ADDR(base,
drv_lic_key[port].max_iscsi_init_conn);
val16 = CNIC_RD16(dev, addr);
if (val16)
val16 ^= 0x1e1e;
dev->max_iscsi_conn = val16;
}
if (BNX2X_CHIP_IS_E2(cp->chip_id))
dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
int func = CNIC_FUNC(cp);
u32 mf_cfg_addr;
if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
mf_cfg_addr));
else
mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
/* Must determine if the MF is SD vs SI mode */
addr = BNX2X_SHMEM_ADDR(base,
dev_info.shared_feature_config.config);
val = CNIC_RD(dev, addr);
if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
int rc;
/* MULTI_FUNCTION_SI mode */
addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
func_ext_config[func].func_cfg);
val = CNIC_RD(dev, addr);
if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
dev->max_iscsi_conn = 0;
if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
dev->max_fcoe_conn = 0;
addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
func_ext_config[func].
iscsi_mac_addr_upper);
addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
func_ext_config[func].
iscsi_mac_addr_lower);
rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
addr1);
if (rc && func > 1)
dev->max_iscsi_conn = 0;
return;
}
}
addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
func_mf_config[func].e1hov_tag);
val = CNIC_RD(dev, addr);
val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
dev->max_fcoe_conn = 0;
dev->max_iscsi_conn = 0;
}
}
if (!is_valid_ether_addr(dev->mac_addr))
dev->max_iscsi_conn = 0;
}
static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@ -4926,8 +4819,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
cnic_init_bnx2x_kcq(dev);
cnic_get_bnx2x_iscsi_info(dev);
/* Only 1 EQ */
CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@ -5281,15 +5172,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
dev_hold(dev);
pci_dev_get(pdev);
if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
pdev->device == PCI_DEVICE_ID_NX2_5709S) {
u8 rev;
pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
if (rev < 0x10) {
pci_dev_put(pdev);
goto cnic_err;
}
if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
(pdev->revision < 0x10)) {
pci_dev_put(pdev);
goto cnic_err;
}
pci_dev_put(pdev);
@ -5360,6 +5247,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
!(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
cp->stop_hw = cnic_stop_bnx2x_hw;

View file

@ -220,7 +220,7 @@ struct cnic_local {
#define ULP_F_INIT 0
#define ULP_F_START 1
#define ULP_F_CALL_PENDING 2
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
unsigned long cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x0

View file

@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
#define CNIC_MODULE_VERSION "2.2.12"
#define CNIC_MODULE_RELDATE "Jan 03, 2011"
#define CNIC_MODULE_VERSION "2.2.13"
#define CNIC_MODULE_RELDATE "Jan 31, 2011"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@ -159,6 +159,9 @@ struct cnic_eth_dev {
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
#define CNIC_DRV_STATE_NO_FCOE 0x00000010
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
@ -176,6 +179,7 @@ struct cnic_eth_dev {
u32 fcoe_init_cid;
u16 iscsi_l2_client_id;
u16 iscsi_l2_cid;
u8 iscsi_mac[ETH_ALEN];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];

View file

@ -95,6 +95,9 @@
Dmitry Pervushin : dpervushin@ru.mvista.com
: PNX010X platform support
Domenico Andreoli : cavokz@gmail.com
: QQ2440 platform support
*/
/* Always include 'config.h' first in case the user wants to turn on
@ -176,6 +179,10 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
#elif defined(CONFIG_ARCH_IXDP2X01)
static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
#elif defined(CONFIG_MACH_QQ2440)
#include <mach/qq2440.h>
static unsigned int netcard_portlist[] __used __initdata = { QQ2440_CS8900_VIRT_BASE + 0x300, 0 };
static unsigned int cs8900_irq_map[] = { QQ2440_CS8900_IRQ, 0, 0, 0 };
#elif defined(CONFIG_MACH_MX31ADS)
#include <mach/board-mx31ads.h>
static unsigned int netcard_portlist[] __used __initdata = {
@ -521,6 +528,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
#endif
lp->force = g_cs89x0_media__force;
#endif
#if defined(CONFIG_MACH_QQ2440)
lp->force |= FORCE_RJ45 | FORCE_FULL;
#endif
}
/* Grab the region so we can find another board if autoIRQ fails. */
@ -943,10 +954,10 @@ skip_this_frame:
static void __init reset_chip(struct net_device *dev)
{
#if !defined(CONFIG_MACH_MX31ADS)
#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
#if !defined(CS89x0_NONISA_IRQ)
struct net_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
#endif
#endif /* CS89x0_NONISA_IRQ */
int reset_start_time;
writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@ -954,7 +965,7 @@ static void __init reset_chip(struct net_device *dev)
/* wait 30 ms */
msleep(30);
#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
#if !defined(CS89x0_NONISA_IRQ)
if (lp->chip_type != CS8900) {
/* Hardware problem requires PNP registers to be reconfigured after a reset */
writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
@ -965,7 +976,7 @@ static void __init reset_chip(struct net_device *dev)
outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
}
#endif /* IXDP2x01 */
#endif /* CS89x0_NONISA_IRQ */
/* Wait until the chip is reset */
reset_start_time = jiffies;

View file

@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
dev = NULL;
if (grp)
dev = vlan_group_get_device(grp, vlan);
} else
} else if (netif_is_bond_slave(dev)) {
while (dev->master)
dev = dev->master;
}
return dev;
}
}
@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
cxgb_neigh_update((struct neighbour *)ctx);
break;
}
case (NETEVENT_PMTU_UPDATE):
break;
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);

View file

@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
case NETEVENT_NEIGH_UPDATE:
check_neigh_update(data);
break;
case NETEVENT_PMTU_UPDATE:
case NETEVENT_REDIRECT:
default:
break;

View file

@ -1730,7 +1730,7 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
emac_read(EMAC_TXCARRIERSENSE);
emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
ndev->stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
return &ndev->stats;

View file

@ -1593,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev)
ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
}
if (!is_valid_ether_addr(ndev->dev_addr))
if (!is_valid_ether_addr(ndev->dev_addr)) {
dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
"set using ifconfig\n", ndev->name);
random_ether_addr(ndev->dev_addr);
mac_src = "random";
}
platform_set_drvdata(pdev, ndev);
ret = register_netdev(ndev);

View file

@ -86,6 +86,7 @@
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
/* Receive Descriptor bit definitions */

View file

@ -364,6 +364,7 @@ struct e1000_adapter {
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
spinlock_t stats64_lock;
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter);
extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64
*stats);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);

View file

@ -46,15 +46,15 @@ struct e1000_stats {
};
#define E1000_STAT(str, m) { \
.stat_string = str, \
.type = E1000_STATS, \
.sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
.stat_offset = offsetof(struct e1000_adapter, m) }
.stat_string = str, \
.type = E1000_STATS, \
.sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
.stat_offset = offsetof(struct e1000_adapter, m) }
#define E1000_NETDEV_STAT(str, m) { \
.stat_string = str, \
.type = NETDEV_STATS, \
.sizeof_stat = sizeof(((struct net_device *)0)->m), \
.stat_offset = offsetof(struct net_device, m) }
.stat_string = str, \
.type = NETDEV_STATS, \
.sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
.stat_offset = offsetof(struct rtnl_link_stats64, m) }
static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_packets", stats.gprc),
@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("tx_broadcast", stats.bptc),
E1000_STAT("rx_multicast", stats.mprc),
E1000_STAT("tx_multicast", stats.mptc),
E1000_NETDEV_STAT("rx_errors", stats.rx_errors),
E1000_NETDEV_STAT("tx_errors", stats.tx_errors),
E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped),
E1000_NETDEV_STAT("rx_errors", rx_errors),
E1000_NETDEV_STAT("tx_errors", tx_errors),
E1000_NETDEV_STAT("tx_dropped", tx_dropped),
E1000_STAT("multicast", stats.mprc),
E1000_STAT("collisions", stats.colc),
E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors),
E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors),
E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
E1000_STAT("rx_crc_errors", stats.crcerrs),
E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors),
E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
E1000_STAT("rx_no_buffer_count", stats.rnbc),
E1000_STAT("rx_missed_errors", stats.mpc),
E1000_STAT("tx_aborted_errors", stats.ecol),
E1000_STAT("tx_carrier_errors", stats.tncrs),
E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors),
E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors),
E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
E1000_STAT("tx_window_errors", stats.latecol),
E1000_STAT("tx_abort_late_coll", stats.latecol),
E1000_STAT("tx_deferred_ok", stats.dc),
@ -433,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u16 phy_data;
u8 revision_id;
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@ -684,20 +682,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!tx_ring)
goto err_alloc_tx;
/*
* use a memcpy to save any previously configured
* items like napi structs from having to be
* reinitialized
*/
memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!rx_ring)
goto err_alloc_rx;
memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
adapter->tx_ring = tx_ring;
adapter->rx_ring = rx_ring;
@ -1255,7 +1246,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
u32 stat_reg = 0;
u16 phy_reg = 0;
s32 ret_val = 0;
@ -1363,8 +1353,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
* Set the ILOS bit on the fiber Nic if half duplex link is
* detected.
*/
stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
if ((er32(STATUS) & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
}
@ -1677,10 +1666,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(hw);
if (hw->mac.autoneg)
msleep(4000);
/*
* On some Phy/switch combinations, link establishment
* can take a few seconds more than expected.
*/
msleep(5000);
if (!(er32(STATUS) &
E1000_STATUS_LU))
if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
}
return *data;
@ -1807,8 +1799,7 @@ static void e1000_get_wol(struct net_device *netdev,
return;
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC |
WAKE_PHY | WAKE_ARP;
WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
/* apply any specific unsupported masks here */
if (adapter->flags & FLAG_NO_WAKE_UCAST) {
@ -1829,19 +1820,16 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & E1000_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
if (adapter->wol & E1000_WUFC_ARP)
wol->wolopts |= WAKE_ARP;
}
static int e1000_set_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
if (!(adapter->flags & FLAG_HAS_WOL) ||
!device_can_wakeup(&adapter->pdev->dev) ||
(wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
WAKE_MAGIC | WAKE_PHY | WAKE_ARP)))
WAKE_MAGIC | WAKE_PHY)))
return -EOPNOTSUPP;
/* these settings will always override what we currently have */
@ -1857,8 +1845,6 @@ static int e1000_set_wol(struct net_device *netdev,
adapter->wol |= E1000_WUFC_MAG;
if (wol->wolopts & WAKE_PHY)
adapter->wol |= E1000_WUFC_LNKC;
if (wol->wolopts & WAKE_ARP)
adapter->wol |= E1000_WUFC_ARP;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
@ -1972,8 +1958,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
static int e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
e1000e_reinit_locked(adapter);
if (!netif_running(netdev))
return -EAGAIN;
if (!adapter->hw.mac.autoneg)
return -EINVAL;
e1000e_reinit_locked(adapter);
return 0;
}
@ -1982,14 +1975,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct rtnl_link_stats64 net_stats;
int i;
char *p = NULL;
e1000e_update_stats(adapter);
e1000e_get_stats64(netdev, &net_stats);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
p = (char *) netdev +
p = (char *) &net_stats +
e1000_gstrings_stats[i].stat_offset;
break;
case E1000_STATS:
@ -2014,7 +2008,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {

View file

@ -812,9 +812,8 @@ struct e1000_nvm_operations {
struct e1000_mac_info {
struct e1000_mac_operations ops;
u8 addr[6];
u8 perm_addr[6];
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
enum e1000_mac_type type;

View file

@ -140,6 +140,11 @@
#define I82579_LPI_CTRL PHY_REG(772, 20)
#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
/* EMI Registers */
#define I82579_EMI_ADDR 0x10
#define I82579_EMI_DATA 0x11
#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
/* Strapping Option Register - RO */
#define E1000_STRAP 0x0000C
#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
@ -302,9 +307,9 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
* the interconnect to PCIe mode.
*/
fwsm = er32(FWSM);
if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
ew32(CTRL, ctrl);
udelay(10);
@ -331,7 +336,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
goto out;
/* Ungate automatic PHY configuration on non-managed 82579 */
if ((hw->mac.type == e1000_pch2lan) &&
if ((hw->mac.type == e1000_pch2lan) &&
!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
msleep(10);
e1000_gate_hw_phy_config_ich8lan(hw, false);
@ -366,7 +371,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_phy_82579:
phy->ops.check_polarity = e1000_check_polarity_82577;
phy->ops.force_speed_duplex =
e1000_phy_force_speed_duplex_82577;
e1000_phy_force_speed_duplex_82577;
phy->ops.get_cable_length = e1000_get_cable_length_82577;
phy->ops.get_info = e1000_get_phy_info_82577;
phy->ops.commit = e1000e_phy_sw_reset;
@ -753,7 +758,13 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
if (rc)
return rc;
if (adapter->hw.phy.type == e1000_phy_ife) {
/*
* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
* on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
*/
if ((adapter->hw.phy.type == e1000_phy_ife) ||
((adapter->hw.mac.type >= e1000_pch2lan) &&
(!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
}
@ -1723,11 +1734,25 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
/* Configure the LCD with the OEM bits in NVM */
ret_val = e1000_oem_bits_config_ich8lan(hw, true);
/* Ungate automatic PHY configuration on non-managed 82579 */
if ((hw->mac.type == e1000_pch2lan) &&
!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
msleep(10);
e1000_gate_hw_phy_config_ich8lan(hw, false);
if (hw->mac.type == e1000_pch2lan) {
/* Ungate automatic PHY configuration on non-managed 82579 */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
msleep(10);
e1000_gate_hw_phy_config_ich8lan(hw, false);
}
/* Set EEE LPI Update Timer to 200usec */
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
goto out;
ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
I82579_LPI_UPDATE_TIMER);
if (ret_val)
goto release;
ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
0x1387);
release:
hw->phy.ops.release(hw);
}
out:
@ -2104,7 +2129,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
{
union ich8_hws_flash_status hsfsts;
s32 ret_val = -E1000_ERR_NVM;
s32 i = 0;
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
@ -2140,6 +2164,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
ret_val = 0;
} else {
s32 i = 0;
/*
* Otherwise poll for sometime so the current
* cycle has a chance to end before giving up.

View file

@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
u16 timeout = 0;
u8 spi_stat_reg;
if (nvm->type == e1000_nvm_eeprom_spi) {
u16 timeout = NVM_MAX_RETRY_SPI;
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
udelay(1);
timeout = NVM_MAX_RETRY_SPI;
/*
* Read "Status Register" repeatedly until the LSB is cleared.

View file

@ -54,7 +54,7 @@
#define DRV_EXTRAVERSION "-k2"
#define DRV_VERSION "1.2.20" DRV_EXTRAVERSION
#define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@ -900,8 +900,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
netdev->stats.rx_bytes += total_rx_bytes;
netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@ -1060,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
netdev->stats.tx_bytes += total_tx_bytes;
netdev->stats.tx_packets += total_tx_packets;
return count < tx_ring->count;
}
@ -1248,8 +1244,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
netdev->stats.rx_bytes += total_rx_bytes;
netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@ -1328,7 +1322,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* an error means any chain goes out the window
* too */
if (rx_ring->rx_skb_top)
dev_kfree_skb(rx_ring->rx_skb_top);
dev_kfree_skb_irq(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
@ -1401,7 +1395,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
e_err("pskb_may_pull failed.\n");
dev_kfree_skb(skb);
dev_kfree_skb_irq(skb);
goto next_desc;
}
@ -1429,8 +1423,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
netdev->stats.rx_bytes += total_rx_bytes;
netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@ -1857,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
int err = 0, vector = 0;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
snprintf(adapter->rx_ring->name,
sizeof(adapter->rx_ring->name) - 1,
"%s-rx-0", netdev->name);
else
memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@ -1870,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
vector++;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
snprintf(adapter->tx_ring->name,
sizeof(adapter->tx_ring->name) - 1,
"%s-tx-0", netdev->name);
else
memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@ -2734,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl, rfctl;
u32 psrctl = 0;
u32 pages = 0;
/* Workaround Si errata on 82579 - configure jumbo frame flow */
@ -2833,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
adapter->rx_ps_pages = 0;
if (adapter->rx_ps_pages) {
u32 psrctl = 0;
/* Configure extra packet-split registers */
rfctl = er32(RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
@ -3034,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
int i;
/* Check for Promiscuous and All Multicast modes */
@ -3057,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
ew32(RCTL, rctl);
if (!netdev_mc_empty(netdev)) {
int i = 0;
mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return;
/* prepare a packed array of only addresses. */
i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
@ -3359,6 +3356,8 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
e1e_flush();
}
static void e1000e_update_stats(struct e1000_adapter *adapter);
void e1000e_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@ -3393,6 +3392,11 @@ void e1000e_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->phy_info_timer);
netif_carrier_off(netdev);
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
spin_unlock(&adapter->stats64_lock);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@ -3437,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->stats64_lock);
e1000e_set_interrupt_capability(adapter);
if (e1000_alloc_queues(adapter))
@ -3918,7 +3924,7 @@ release:
* e1000e_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
void e1000e_update_stats(struct e1000_adapter *adapter)
static void e1000e_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
@ -4030,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_phy_regs *phy = &adapter->phy_regs;
int ret_val;
if ((er32(STATUS) & E1000_STATUS_LU) &&
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@ -4179,7 +4186,6 @@ static void e1000_watchdog_task(struct work_struct *work)
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_hw *hw = &adapter->hw;
u32 link, tctl;
int tx_pending = 0;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@ -4320,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
}
link_up:
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
spin_unlock(&adapter->stats64_lock);
mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
adapter->tpt_old = adapter->stats.tpt;
@ -4334,20 +4342,17 @@ link_up:
e1000e_update_adaptive(&adapter->hw);
if (!netif_carrier_ok(netdev)) {
tx_pending = (e1000_desc_unused(tx_ring) + 1 <
tx_ring->count);
if (tx_pending) {
/*
* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context).
*/
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return;
}
if (!netif_carrier_ok(netdev) &&
(e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
/*
* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context).
*/
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return;
}
/* Simple mode for Interrupt Throttle Rate (ITR) */
@ -4411,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
int err;
if (!skb_is_gso(skb))
return 0;
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
return err;
}
@ -4928,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
}
/**
* e1000_get_stats - Get System Network Statistics
* e1000_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
* The statistics are actually updated from the timer callback.
**/
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
/* only return the current stats */
return &netdev->stats;
struct e1000_adapter *adapter = netdev_priv(netdev);
memset(stats, 0, sizeof(struct rtnl_link_stats64));
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
/* Fill out the OS statistics structure */
stats->rx_bytes = adapter->stats.gorc;
stats->rx_packets = adapter->stats.gprc;
stats->tx_bytes = adapter->stats.gotc;
stats->tx_packets = adapter->stats.gptc;
stats->multicast = adapter->stats.mprc;
stats->collisions = adapter->stats.colc;
/* Rx Errors */
/*
* RLEC on some newer hardware can be incorrect so build
* our own version based on RUC and ROC
*/
stats->rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
adapter->stats.cexterr;
stats->rx_length_errors = adapter->stats.ruc +
adapter->stats.roc;
stats->rx_crc_errors = adapter->stats.crcerrs;
stats->rx_frame_errors = adapter->stats.algnerrc;
stats->rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
stats->tx_errors = adapter->stats.ecol +
adapter->stats.latecol;
stats->tx_aborted_errors = adapter->stats.ecol;
stats->tx_window_errors = adapter->stats.latecol;
stats->tx_carrier_errors = adapter->stats.tncrs;
/* Tx Dropped needs to be maintained elsewhere */
spin_unlock(&adapter->stats64_lock);
return stats;
}
/**
@ -5507,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
int vector, msix_irq;
if (adapter->msix_entries) {
int vector, msix_irq;
vector = 0;
msix_irq = adapter->msix_entries[vector].vector;
disable_irq(msix_irq);
@ -5706,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_open = e1000_open,
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
.ndo_get_stats = e1000_get_stats,
.ndo_get_stats64 = e1000e_get_stats64,
.ndo_set_multicast_list = e1000_set_multi,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,

View file

@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
{
s32 ret_val;
u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
u32 page_shift, page_select;
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
@ -2468,9 +2468,7 @@ out:
s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
u32 page_shift, page_select;
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for

View file

@ -1,5 +1,5 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o

View file

@ -32,13 +32,13 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
#define DRV_VERSION "1.4.1.10"
#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
#define DRV_VERSION "2.1.1.12"
#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
#define ENIC_WQ_MAX 8
#define ENIC_RQ_MAX 8
#define ENIC_WQ_MAX 1
#define ENIC_RQ_MAX 1
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
@ -49,7 +49,7 @@ struct enic_msix_entry {
void *devid;
};
#define ENIC_SET_APPLIED (1 << 0)
#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
#define ENIC_SET_REQUEST (1 << 1)
#define ENIC_SET_NAME (1 << 2)
#define ENIC_SET_INSTANCE (1 << 3)
@ -101,7 +101,6 @@ struct enic {
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
unsigned int rq_count;
int (*rq_alloc_buf)(struct vnic_rq *rq);
u64 rq_truncated_pkts;
u64 rq_bad_fcs;
struct napi_struct napi[ENIC_RQ_MAX];

221
drivers/net/enic/enic_dev.c Normal file
View file

@ -0,0 +1,221 @@
/*
* Copyright 2011 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include "vnic_dev.h"
#include "vnic_vic.h"
#include "enic_res.h"
#include "enic.h"
#include "enic_dev.h"
int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_fw_info(enic->vdev, fw_info);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_stats_dump(enic->vdev, vstats);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_add_station_addr(struct enic *enic)
{
int err;
if (!is_valid_ether_addr(enic->netdev->dev_addr))
return -EADDRNOTAVAIL;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_del_station_addr(struct enic *enic)
{
int err;
if (!is_valid_ether_addr(enic->netdev->dev_addr))
return -EADDRNOTAVAIL;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_packet_filter(enic->vdev, directed,
multicast, broadcast, promisc, allmulti);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_add_addr(struct enic *enic, u8 *addr)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_add_addr(enic->vdev, addr);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_del_addr(struct enic *enic, u8 *addr)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_del_addr(enic->vdev, addr);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_notify_unset(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_notify_unset(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_hang_notify(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_hang_notify(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_enable(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_enable_wait(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_disable(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_disable(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_vnic_dev_deinit(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_deinit(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_init_prov(enic->vdev,
(u8 *)vp, vic_provinfo_size(vp));
spin_unlock(&enic->devcmd_lock);
return err;
}
int enic_dev_init_done(struct enic *enic, int *done, int *error)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_init_done(enic->vdev, done, error);
spin_unlock(&enic->devcmd_lock);
return err;
}
/* rtnl lock is held */
void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
spin_lock(&enic->devcmd_lock);
enic_add_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
}
/* rtnl lock is held */
void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
spin_lock(&enic->devcmd_lock);
enic_del_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
}

View file

@ -0,0 +1,41 @@
/*
* Copyright 2011 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _ENIC_DEV_H_
#define _ENIC_DEV_H_
int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
int enic_dev_add_station_addr(struct enic *enic);
int enic_dev_del_station_addr(struct enic *enic);
int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti);
int enic_dev_add_addr(struct enic *enic, u8 *addr);
int enic_dev_del_addr(struct enic *enic, u8 *addr);
void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
int enic_dev_notify_unset(struct enic *enic);
int enic_dev_hang_notify(struct enic *enic);
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
int enic_dev_enable(struct enic *enic);
int enic_dev_disable(struct enic *enic);
int enic_vnic_dev_deinit(struct enic *enic);
int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
int enic_dev_init_done(struct enic *enic, int *done, int *error);
#endif /* _ENIC_DEV_H_ */

View file

@ -44,6 +44,7 @@
#include "vnic_vic.h"
#include "enic_res.h"
#include "enic.h"
#include "enic_dev.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
return 0;
}
static int enic_dev_fw_info(struct enic *enic,
struct vnic_devcmd_fw_info **fw_info)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_fw_info(enic->vdev, fw_info);
spin_unlock(&enic->devcmd_lock);
return err;
}
static void enic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
}
}
static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_stats_dump(enic->vdev, vstats);
spin_unlock(&enic->devcmd_lock);
return err;
}
static void enic_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
return net_stats;
}
static void enic_reset_multicast_list(struct enic *enic)
static void enic_reset_addr_lists(struct enic *enic)
{
enic->mc_count = 0;
enic->uc_count = 0;
enic->flags = 0;
}
@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
return 0;
}
static int enic_dev_add_station_addr(struct enic *enic)
{
int err = 0;
if (is_valid_ether_addr(enic->netdev->dev_addr)) {
spin_lock(&enic->devcmd_lock);
err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
spin_unlock(&enic->devcmd_lock);
}
return err;
}
static int enic_dev_del_station_addr(struct enic *enic)
{
int err = 0;
if (is_valid_ether_addr(enic->netdev->dev_addr)) {
spin_lock(&enic->devcmd_lock);
err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
spin_unlock(&enic->devcmd_lock);
}
return err;
}
static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
{
struct enic *enic = netdev_priv(netdev);
@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
return enic_dev_add_station_addr(enic);
}
static int enic_dev_packet_filter(struct enic *enic, int directed,
int multicast, int broadcast, int promisc, int allmulti)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_packet_filter(enic->vdev, directed,
multicast, broadcast, promisc, allmulti);
spin_unlock(&enic->devcmd_lock);
return err;
}
static int enic_dev_add_addr(struct enic *enic, u8 *addr)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_add_addr(enic->vdev, addr);
spin_unlock(&enic->devcmd_lock);
return err;
}
static int enic_dev_del_addr(struct enic *enic, u8 *addr)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_del_addr(enic->vdev, addr);
spin_unlock(&enic->devcmd_lock);
return err;
}
static void enic_add_multicast_addr_list(struct enic *enic)
static void enic_update_multicast_addr_list(struct enic *enic)
{
struct net_device *netdev = enic->netdev;
struct netdev_hw_addr *ha;
@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic)
enic->mc_count = mc_count;
}
static void enic_add_unicast_addr_list(struct enic *enic)
static void enic_update_unicast_addr_list(struct enic *enic)
{
struct net_device *netdev = enic->netdev;
struct netdev_hw_addr *ha;
@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
if (!promisc) {
enic_add_unicast_addr_list(enic);
enic_update_unicast_addr_list(enic);
if (!allmulti)
enic_add_multicast_addr_list(enic);
enic_update_multicast_addr_list(enic);
}
}
@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev,
enic->vlan_group = vlan_group;
}
/* rtnl lock is held */
static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
spin_lock(&enic->devcmd_lock);
enic_add_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
}
/* rtnl lock is held */
static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
spin_lock(&enic->devcmd_lock);
enic_del_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
}
/* netif_tx_lock held, BHs disabled */
static void enic_tx_timeout(struct net_device *netdev)
{
@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev)
schedule_work(&enic->reset);
}
static int enic_vnic_dev_deinit(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_deinit(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_init_prov(enic->vdev,
(u8 *)vp, vic_provinfo_size(vp));
spin_unlock(&enic->devcmd_lock);
return err;
}
static int enic_dev_init_done(struct enic *enic, int *done, int *error)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_init_done(enic->vdev, done, error);
spin_unlock(&enic->devcmd_lock);
return err;
}
static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct enic *enic = netdev_priv(netdev);
@ -1262,6 +1126,8 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
if (err)
return err;
enic_reset_addr_lists(enic);
switch (enic->pp.request) {
case PORT_REQUEST_ASSOCIATE:
@ -1318,18 +1184,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
vic_provinfo_free(vp);
if (err)
return err;
enic->pp.set |= ENIC_SET_APPLIED;
break;
case PORT_REQUEST_DISASSOCIATE:
enic->pp.set &= ~ENIC_SET_APPLIED;
break;
default:
return -EINVAL;
}
/* Set flag to indicate that the port assoc/disassoc
* request has been sent out to fw
*/
enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
return 0;
}
@ -1379,9 +1247,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (is_zero_ether_addr(netdev->dev_addr))
random_ether_addr(netdev->dev_addr);
} else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
if (!is_zero_ether_addr(enic->pp.mac_addr))
enic_dev_del_addr(enic, enic->pp.mac_addr);
}
memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@ -1390,9 +1255,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (err)
goto set_port_profile_cleanup;
if (!is_zero_ether_addr(enic->pp.mac_addr))
enic_dev_add_addr(enic, enic->pp.mac_addr);
set_port_profile_cleanup:
memset(enic->pp.vf_mac, 0, ETH_ALEN);
@ -1411,7 +1273,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
int err, error, done;
u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
if (!(enic->pp.set & ENIC_SET_APPLIED))
if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
return -ENODATA;
err = enic_dev_init_done(enic, &done, &error);
@ -1489,62 +1351,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
{
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
if (vnic_rq_posting_soon(rq)) {
/* SW workaround for A0 HW erratum: if we're just about
* to write posted_index, insert a dummy desc
* of type resvd
*/
rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
vnic_rq_post(rq, 0, 0, 0, 0);
} else {
return enic_rq_alloc_buf(rq);
}
return 0;
}
static int enic_dev_hw_version(struct enic *enic,
enum vnic_dev_hw_version *hw_ver)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_hw_version(enic->vdev, hw_ver);
spin_unlock(&enic->devcmd_lock);
return err;
}
static int enic_set_rq_alloc_buf(struct enic *enic)
{
enum vnic_dev_hw_version hw_ver;
int err;
err = enic_dev_hw_version(enic, &hw_ver);
if (err)
return err;
switch (hw_ver) {
case VNIC_DEV_HW_VER_A1:
enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
break;
case VNIC_DEV_HW_VER_A2:
case VNIC_DEV_HW_VER_UNKNOWN:
enic->rq_alloc_buf = enic_rq_alloc_buf;
break;
default:
return -ENODEV;
}
return 0;
}
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@ -1681,7 +1487,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@ -1731,7 +1537,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf);
err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again.
@ -1901,39 +1707,6 @@ static int enic_dev_notify_set(struct enic *enic)
return err;
}
static int enic_dev_notify_unset(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_notify_unset(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
static int enic_dev_enable(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_enable_wait(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
static int enic_dev_disable(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_disable(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
static void enic_notify_timer_start(struct enic *enic)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
@ -1967,7 +1740,7 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
@ -2285,29 +2058,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
rss_hash_bits, rss_base_cpu, rss_enable);
}
static int enic_dev_hang_notify(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_hang_notify(enic->vdev);
spin_unlock(&enic->devcmd_lock);
return err;
}
static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
spin_unlock(&enic->devcmd_lock);
return err;
}
static void enic_reset(struct work_struct *work)
{
struct enic *enic = container_of(work, struct enic, reset);
@ -2320,7 +2070,7 @@ static void enic_reset(struct work_struct *work)
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
enic_reset_multicast_list(enic);
enic_reset_addr_lists(enic);
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
@ -2332,7 +2082,7 @@ static void enic_reset(struct work_struct *work)
static int enic_set_intr_mode(struct enic *enic)
{
unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
unsigned int m = 1;
unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
unsigned int i;
/* Set interrupt mode (INTx, MSI, MSI-X) depending
@ -2475,9 +2225,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_tx_timeout = enic_tx_timeout,
.ndo_set_vf_port = enic_set_vf_port,
.ndo_get_vf_port = enic_get_vf_port,
#ifdef IFLA_VF_MAX
.ndo_set_vf_mac = enic_set_vf_mac,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = enic_poll_controller,
#endif
@ -2556,25 +2304,12 @@ static int enic_dev_init(struct enic *enic)
enic_init_vnic_resources(enic);
err = enic_set_rq_alloc_buf(enic);
if (err) {
dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
goto err_out_free_vnic_resources;
}
err = enic_set_rss_nic_cfg(enic);
if (err) {
dev_err(dev, "Failed to config nic, aborting\n");
goto err_out_free_vnic_resources;
}
err = enic_dev_set_ig_vlan_rewrite_mode(enic);
if (err) {
dev_err(dev,
"Failed to set ingress vlan rewrite mode, aborting.\n");
goto err_out_free_vnic_resources;
}
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@ -2713,6 +2448,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
goto err_out_vnic_unregister;
}
/* Setup devcmd lock
*/
spin_lock_init(&enic->devcmd_lock);
/*
* Set ingress vlan rewrite mode before vnic initialization
*/
err = enic_dev_set_ig_vlan_rewrite_mode(enic);
if (err) {
dev_err(dev,
"Failed to set ingress vlan rewrite mode, aborting.\n");
goto err_out_dev_close;
}
/* Issue device init to initialize the vnic-to-switch link.
* We'll start with carrier off and wait for link UP
* notification later to turn on carrier. We don't need
@ -2736,11 +2487,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
}
}
/* Setup devcmd lock
*/
spin_lock_init(&enic->devcmd_lock);
err = enic_dev_init(enic);
if (err) {
dev_err(dev, "Device initialization failed, aborting\n");

View file

@ -408,10 +408,17 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
if (!vdev->fw_info)
return -ENOMEM;
memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
a0 = vdev->fw_info_pa;
a1 = sizeof(struct vnic_devcmd_fw_info);
/* only get fw_info once and cache it */
err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
if (err == ERR_ECMDUNKNOWN) {
err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
&a0, &a1, wait);
}
}
*fw_info = vdev->fw_info;
@ -419,25 +426,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
return err;
}
int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
{
struct vnic_devcmd_fw_info *fw_info;
int err;
err = vnic_dev_fw_info(vdev, &fw_info);
if (err)
return err;
if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
*hw_ver = VNIC_DEV_HW_VER_A1;
else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
*hw_ver = VNIC_DEV_HW_VER_A2;
else
*hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
return 0;
}
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value)
{

View file

@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
enum vnic_dev_hw_version {
VNIC_DEV_HW_VER_UNKNOWN,
VNIC_DEV_HW_VER_A1,
VNIC_DEV_HW_VER_A2,
};
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_hw_version(struct vnic_dev *vdev,
enum vnic_dev_hw_version *hw_ver);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);

View file

@ -80,8 +80,34 @@
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
/* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
/*
* mcpu fw info in mem:
* in:
* (u64)a0=paddr to struct vnic_devcmd_fw_info
* action:
* Fills in struct vnic_devcmd_fw_info (128 bytes)
* note:
* An old definition of CMD_MCPU_FW_INFO
*/
CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
/*
* mcpu fw info in mem:
* in:
* (u64)a0=paddr to struct vnic_devcmd_fw_info
* (u16)a1=size of the structure
* out:
* (u16)a1=0 for in:a1 = 0,
* data size actually written for other values.
* action:
* Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
* first in:a1 bytes for 0 < in:a1 <= 132,
* 132 bytes for other values of in:a1.
* note:
* CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1
* for source compatibility.
*/
CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
@ -291,11 +317,19 @@ enum vnic_devcmd_error {
ERR_EMAXRES = 10,
};
/*
* note: hw_version and asic_rev refer to the same thing,
* but have different formats. hw_version is
* a 32-byte string (e.g. "A2") and asic_rev is
* a 16-bit integer (e.g. 0xA2).
*/
struct vnic_devcmd_fw_info {
char fw_version[32];
char fw_build[32];
char hw_version[32];
char hw_serial_number[32];
u16 asic_type;
u16 asic_rev;
};
struct vnic_devcmd_notify {

View file

@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
}
}
static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
{
return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
}
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
{
rq->ring.desc_avail += count;

View file

@ -111,6 +111,8 @@
* Sorry, I had to rewrite most of this for 2.5.x -DaveM
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
@ -162,7 +164,7 @@ static void eql_timer(unsigned long param)
}
static const char version[] __initconst =
"Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n";
"Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
static const struct net_device_ops eql_netdev_ops = {
.ndo_open = eql_open,
@ -204,8 +206,8 @@ static int eql_open(struct net_device *dev)
equalizer_t *eql = netdev_priv(dev);
/* XXX We should force this off automatically for the user. */
printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
"your slave devices.\n", dev->name);
netdev_info(dev,
"remember to turn off Van-Jacobson compression on your slave devices\n");
BUG_ON(!list_empty(&eql->queue.all_slaves));
@ -591,7 +593,7 @@ static int __init eql_init_module(void)
{
int err;
printk(version);
pr_info("%s\n", version);
dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
if (!dev_eql)

File diff suppressed because it is too large Load diff

View file

@ -5744,7 +5744,7 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
}
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int nv_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
@ -5795,6 +5795,11 @@ static int nv_resume(struct device *device)
static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
#define NV_PM_OPS (&nv_pm_ops)
#else
#define NV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static void nv_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@ -5822,7 +5827,6 @@ static void nv_shutdown(struct pci_dev *pdev)
}
}
#else
#define NV_PM_OPS NULL
#define nv_shutdown NULL
#endif /* CONFIG_PM */

1198
drivers/net/ftmac100.c Normal file

File diff suppressed because it is too large Load diff

180
drivers/net/ftmac100.h Normal file
View file

@ -0,0 +1,180 @@
/*
* Faraday FTMAC100 10/100 Ethernet
*
* (C) Copyright 2009-2011 Faraday Technology
* Po-Yu Chuang <ratbert@faraday-tech.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __FTMAC100_H
#define __FTMAC100_H
#define FTMAC100_OFFSET_ISR 0x00
#define FTMAC100_OFFSET_IMR 0x04
#define FTMAC100_OFFSET_MAC_MADR 0x08
#define FTMAC100_OFFSET_MAC_LADR 0x0c
#define FTMAC100_OFFSET_MAHT0 0x10
#define FTMAC100_OFFSET_MAHT1 0x14
#define FTMAC100_OFFSET_TXPD 0x18
#define FTMAC100_OFFSET_RXPD 0x1c
#define FTMAC100_OFFSET_TXR_BADR 0x20
#define FTMAC100_OFFSET_RXR_BADR 0x24
#define FTMAC100_OFFSET_ITC 0x28
#define FTMAC100_OFFSET_APTC 0x2c
#define FTMAC100_OFFSET_DBLAC 0x30
#define FTMAC100_OFFSET_MACCR 0x88
#define FTMAC100_OFFSET_MACSR 0x8c
#define FTMAC100_OFFSET_PHYCR 0x90
#define FTMAC100_OFFSET_PHYWDATA 0x94
#define FTMAC100_OFFSET_FCR 0x98
#define FTMAC100_OFFSET_BPR 0x9c
#define FTMAC100_OFFSET_TS 0xc4
#define FTMAC100_OFFSET_DMAFIFOS 0xc8
#define FTMAC100_OFFSET_TM 0xcc
#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4
#define FTMAC100_OFFSET_RPF_AEP 0xd8
#define FTMAC100_OFFSET_XM_PG 0xdc
#define FTMAC100_OFFSET_RUNT_TLCC 0xe0
#define FTMAC100_OFFSET_CRCER_FTL 0xe4
#define FTMAC100_OFFSET_RLC_RCC 0xe8
#define FTMAC100_OFFSET_BROC 0xec
#define FTMAC100_OFFSET_MULCA 0xf0
#define FTMAC100_OFFSET_RP 0xf4
#define FTMAC100_OFFSET_XP 0xf8
/*
* Interrupt status register & interrupt mask register
*/
#define FTMAC100_INT_RPKT_FINISH (1 << 0)
#define FTMAC100_INT_NORXBUF (1 << 1)
#define FTMAC100_INT_XPKT_FINISH (1 << 2)
#define FTMAC100_INT_NOTXBUF (1 << 3)
#define FTMAC100_INT_XPKT_OK (1 << 4)
#define FTMAC100_INT_XPKT_LOST (1 << 5)
#define FTMAC100_INT_RPKT_SAV (1 << 6)
#define FTMAC100_INT_RPKT_LOST (1 << 7)
#define FTMAC100_INT_AHB_ERR (1 << 8)
#define FTMAC100_INT_PHYSTS_CHG (1 << 9)
/*
* Interrupt timer control register
*/
#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0)
#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4)
#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7)
#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8)
#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12)
#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15)
/*
* Automatic polling timer control register
*/
#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0)
#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4)
#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8)
#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12)
/*
* DMA burst length and arbitration control register
*/
#define FTMAC100_DBLAC_INCR4_EN (1 << 0)
#define FTMAC100_DBLAC_INCR8_EN (1 << 1)
#define FTMAC100_DBLAC_INCR16_EN (1 << 2)
#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3)
#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6)
#define FTMAC100_DBLAC_RX_THR_EN (1 << 9)
/*
* MAC control register
*/
#define FTMAC100_MACCR_XDMA_EN (1 << 0)
#define FTMAC100_MACCR_RDMA_EN (1 << 1)
#define FTMAC100_MACCR_SW_RST (1 << 2)
#define FTMAC100_MACCR_LOOP_EN (1 << 3)
#define FTMAC100_MACCR_CRC_DIS (1 << 4)
#define FTMAC100_MACCR_XMT_EN (1 << 5)
#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6)
#define FTMAC100_MACCR_RCV_EN (1 << 8)
#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9)
#define FTMAC100_MACCR_RX_RUNT (1 << 10)
#define FTMAC100_MACCR_RX_FTL (1 << 11)
#define FTMAC100_MACCR_RCV_ALL (1 << 12)
#define FTMAC100_MACCR_CRC_APD (1 << 14)
#define FTMAC100_MACCR_FULLDUP (1 << 15)
#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16)
#define FTMAC100_MACCR_RX_BROADPKT (1 << 17)
/*
* PHY control register
*/
#define FTMAC100_PHYCR_MIIRDATA 0xffff
#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16)
#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21)
#define FTMAC100_PHYCR_MIIRD (1 << 26)
#define FTMAC100_PHYCR_MIIWR (1 << 27)
/*
* PHY write data register
*/
#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff)
/*
* Transmit descriptor, aligned to 16 bytes
*/
struct ftmac100_txdes {
unsigned int txdes0;
unsigned int txdes1;
unsigned int txdes2; /* TXBUF_BADR */
unsigned int txdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0)
#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1)
#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31)
#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff)
#define FTMAC100_TXDES1_LTS (1 << 27)
#define FTMAC100_TXDES1_FTS (1 << 28)
#define FTMAC100_TXDES1_TX2FIC (1 << 29)
#define FTMAC100_TXDES1_TXIC (1 << 30)
#define FTMAC100_TXDES1_EDOTR (1 << 31)
/*
* Receive descriptor, aligned to 16 bytes
*/
struct ftmac100_rxdes {
unsigned int rxdes0;
unsigned int rxdes1;
unsigned int rxdes2; /* RXBUF_BADR */
unsigned int rxdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
#define FTMAC100_RXDES0_RFL 0x7ff
#define FTMAC100_RXDES0_MULTICAST (1 << 16)
#define FTMAC100_RXDES0_BROADCAST (1 << 17)
#define FTMAC100_RXDES0_RX_ERR (1 << 18)
#define FTMAC100_RXDES0_CRC_ERR (1 << 19)
#define FTMAC100_RXDES0_FTL (1 << 20)
#define FTMAC100_RXDES0_RUNT (1 << 21)
#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22)
#define FTMAC100_RXDES0_LRS (1 << 28)
#define FTMAC100_RXDES0_FRS (1 << 29)
#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31)
#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff)
#define FTMAC100_RXDES1_EDORR (1 << 31)
#endif /* __FTMAC100_H */

View file

@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *p;
struct bpqdev *bpqdev = v;
++*pos;
if (v == SEQ_START_TOKEN)
p = rcu_dereference(bpq_devices.next);
p = rcu_dereference(list_next_rcu(&bpq_devices));
else
p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next);
p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
return (p == &bpq_devices) ? NULL
: list_entry(p, struct bpqdev, bpq_list);

View file

@ -64,7 +64,14 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *);
static s32 igb_read_mac_addr_82575(struct e1000_hw *);
static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw,
u16 offset);
static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
u16 offset);
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] =
{ 36, 72, 144, 1, 2, 4, 8, 16,
35, 70, 140 };
@ -129,6 +136,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
case E1000_DEV_ID_82580_COPPER:
case E1000_DEV_ID_82580_FIBER:
case E1000_DEV_ID_82580_QUAD_FIBER:
case E1000_DEV_ID_82580_SERDES:
case E1000_DEV_ID_82580_SGMII:
case E1000_DEV_ID_82580_COPPER_DUAL:
@ -194,7 +202,11 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->arc_subsystem_valid =
(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
/* enable EEE on i350 parts */
if (mac->type == e1000_i350)
dev_spec->eee_disable = false;
else
dev_spec->eee_disable = true;
/* physical interface link setup */
mac->ops.setup_physical_interface =
(hw->phy.media_type == e1000_media_type_copper)
@ -232,14 +244,42 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
nvm->word_size = 1 << size;
if (nvm->word_size == (1 << 15))
nvm->page_size = 128;
/* if 82576 then initialize mailbox parameters */
if (mac->type == e1000_82576)
/* NVM Function Pointers */
nvm->ops.acquire = igb_acquire_nvm_82575;
if (nvm->word_size < (1 << 15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
nvm->ops.release = igb_release_nvm_82575;
switch (hw->mac.type) {
case e1000_82580:
nvm->ops.validate = igb_validate_nvm_checksum_82580;
nvm->ops.update = igb_update_nvm_checksum_82580;
break;
case e1000_i350:
nvm->ops.validate = igb_validate_nvm_checksum_i350;
nvm->ops.update = igb_update_nvm_checksum_i350;
break;
default:
nvm->ops.validate = igb_validate_nvm_checksum;
nvm->ops.update = igb_update_nvm_checksum;
}
nvm->ops.write = igb_write_nvm_spi;
/* if part supports SR-IOV then initialize mailbox parameters */
switch (mac->type) {
case e1000_82576:
case e1000_i350:
igb_init_mbx_params_pf(hw);
break;
default:
break;
}
/* setup PHY parameters */
if (phy->media_type != e1000_media_type_copper) {
@ -1747,6 +1787,248 @@ u16 igb_rxpbs_adjust_82580(u32 data)
return ret_val;
}
/**
* igb_validate_nvm_checksum_with_offset - Validate EEPROM
* checksum
* @hw: pointer to the HW structure
* @offset: offset in words of the checksum protected region
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
{
s32 ret_val = 0;
u16 checksum = 0;
u16 i, nvm_data;
for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
checksum += nvm_data;
}
if (checksum != (u16) NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
out:
return ret_val;
}
/**
* igb_update_nvm_checksum_with_offset - Update EEPROM
* checksum
* @hw: pointer to the HW structure
* @offset: offset in words of the checksum protected region
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
**/
s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
{
s32 ret_val;
u16 checksum = 0;
u16 i, nvm_data;
for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error while updating checksum.\n");
goto out;
}
checksum += nvm_data;
}
checksum = (u16) NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
&checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
out:
return ret_val;
}
/**
* igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM section checksum by reading/adding each word of
* the EEPROM and then verifies that the sum of the EEPROM is
* equal to 0xBABA.
**/
static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 eeprom_regions_count = 1;
u16 j, nvm_data;
u16 nvm_offset;
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
/* if chekcsums compatibility bit is set validate checksums
* for all 4 ports. */
eeprom_regions_count = 4;
}
for (j = 0; j < eeprom_regions_count; j++) {
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
ret_val = igb_validate_nvm_checksum_with_offset(hw,
nvm_offset);
if (ret_val != 0)
goto out;
}
out:
return ret_val;
}
/**
* igb_update_nvm_checksum_82580 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM section checksums for all 4 ports by reading/adding
* each word of the EEPROM up to the checksum. Then calculates the EEPROM
* checksum and writes the value to the EEPROM.
**/
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
{
s32 ret_val;
u16 j, nvm_data;
u16 nvm_offset;
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error while updating checksum"
" compatibility bit.\n");
goto out;
}
if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
/* set compatibility bit to validate checksums appropriately */
nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
&nvm_data);
if (ret_val) {
hw_dbg("NVM Write Error while updating checksum"
" compatibility bit.\n");
goto out;
}
}
for (j = 0; j < 4; j++) {
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
if (ret_val)
goto out;
}
out:
return ret_val;
}
/**
* igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM section checksum by reading/adding each word of
* the EEPROM and then verifies that the sum of the EEPROM is
* equal to 0xBABA.
**/
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 j;
u16 nvm_offset;
for (j = 0; j < 4; j++) {
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
ret_val = igb_validate_nvm_checksum_with_offset(hw,
nvm_offset);
if (ret_val != 0)
goto out;
}
out:
return ret_val;
}
/**
* igb_update_nvm_checksum_i350 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM section checksums for all 4 ports by reading/adding
* each word of the EEPROM up to the checksum. Then calculates the EEPROM
* checksum and writes the value to the EEPROM.
**/
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 j;
u16 nvm_offset;
for (j = 0; j < 4; j++) {
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
if (ret_val != 0)
goto out;
}
out:
return ret_val;
}
/**
* igb_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
*
* Enable/disable EEE based on setting in dev_spec structure.
*
**/
s32 igb_set_eee_i350(struct e1000_hw *hw)
{
s32 ret_val = 0;
u32 ipcnfg, eeer, ctrl_ext;
ctrl_ext = rd32(E1000_CTRL_EXT);
if ((hw->mac.type != e1000_i350) ||
(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
goto out;
ipcnfg = rd32(E1000_IPCNFG);
eeer = rd32(E1000_EEER);
/* enable or disable per user setting */
if (!(hw->dev_spec._82575.eee_disable)) {
ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
E1000_IPCNFG_EEE_100M_AN);
eeer |= (E1000_EEER_TX_LPI_EN |
E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
} else {
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
E1000_IPCNFG_EEE_100M_AN);
eeer &= ~(E1000_EEER_TX_LPI_EN |
E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
}
wr32(E1000_IPCNFG, ipcnfg);
wr32(E1000_EEER, eeer);
out:
return ret_val;
}
static struct e1000_mac_operations e1000_mac_ops_82575 = {
.init_hw = igb_init_hw_82575,
.check_for_link = igb_check_for_link_82575,

View file

@ -251,5 +251,6 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_set_eee_i350(struct e1000_hw *);
#endif

Some files were not shown because too many files have changed in this diff Show more