diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/dt-bindings/clock/sifive-fu540-prci.h | 18 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 10 | ||||
-rw-r--r-- | include/linux/netdevice.h | 3 | ||||
-rw-r--r-- | include/net/af_rxrpc.h | 4 | ||||
-rw-r--r-- | include/net/cfg80211.h | 5 | ||||
-rw-r--r-- | include/net/mac80211.h | 63 | ||||
-rw-r--r-- | include/net/netrom.h | 2 | ||||
-rw-r--r-- | include/net/sock.h | 6 | ||||
-rw-r--r-- | include/net/tls.h | 4 |
9 files changed, 73 insertions, 42 deletions
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h new file mode 100644 index 000000000000..6a0b70a37d78 --- /dev/null +++ b/include/dt-bindings/clock/sifive-fu540-prci.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018-2019 SiFive, Inc. + * Wesley Terpstra + * Paul Walmsley + */ + +#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H +#define __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H + +/* Clock indexes for use by Device Tree data and the PRCI driver */ + +#define PRCI_CLK_COREPLL 0 +#define PRCI_CLK_DDRPLL 1 +#define PRCI_CLK_GEMGXLPLL 2 +#define PRCI_CLK_TLCLK 3 + +#endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9d55c63db09b..640a03642766 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -28,6 +28,7 @@ #include <linux/irqbypass.h> #include <linux/swait.h> #include <linux/refcount.h> +#include <linux/nospec.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -513,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { - /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case - * the caller has read kvm->online_vcpus before (as is the case - * for kvm_for_each_vcpu, for example). - */ + int num_vcpus = atomic_read(&kvm->online_vcpus); + i = array_index_nospec(i, num_vcpus); + + /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ smp_rmb(); return kvm->vcpus[i]; } @@ -600,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm); static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) { + as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, lockdep_is_held(&kvm->slots_lock) || !refcount_read(&kvm->users_count)); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 26f69cf763f4..324e872c91d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1500,6 +1500,7 @@ struct net_device_ops { * @IFF_FAILOVER: device is a failover master device * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device + * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1532,6 +1533,7 @@ enum netdev_priv_flags { IFF_FAILOVER = 1<<27, IFF_FAILOVER_SLAVE = 1<<28, IFF_L3MDEV_RX_HANDLER = 1<<29, + IFF_LIVE_RENAME_OK = 1<<30, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1563,6 +1565,7 @@ enum netdev_priv_flags { #define IFF_FAILOVER IFF_FAILOVER #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER +#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK /** * struct net_device - The DEVICE structure. diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 2bfb87eb98ce..78c856cba4f5 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); -u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); +bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *, + u32 *); void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, ktime_t *); +bool rxrpc_kernel_call_is_complete(struct rxrpc_call *); #endif /* _NET_RXRPC_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index bb307a11ee63..13bfeb712d36 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev, #define wiphy_info(wiphy, format, args...) \ dev_info(&(wiphy)->dev, format, ##args) +#define wiphy_err_ratelimited(wiphy, format, args...) \ + dev_err_ratelimited(&(wiphy)->dev, format, ##args) +#define wiphy_warn_ratelimited(wiphy, format, args...) \ + dev_warn_ratelimited(&(wiphy)->dev, format, ##args) + #define wiphy_debug(wiphy, format, args...) \ wiphy_printk(KERN_DEBUG, wiphy, format, ##args) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ac2ed8ec662b..112dc18c658f 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, * @hw: pointer as obtained from ieee80211_alloc_hw() * @ac: AC number to return packets from. * - * Should only be called between calls to ieee80211_txq_schedule_start() - * and ieee80211_txq_schedule_end(). * Returns the next txq if successful, %NULL if no queue is eligible. If a txq * is returned, it should be returned with ieee80211_return_txq() after the * driver has finished scheduling it. @@ -6240,51 +6238,58 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac); /** - * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() - * - * @hw: pointer as obtained from ieee80211_alloc_hw() - * @txq: pointer obtained from station or virtual interface - * - * Should only be called between calls to ieee80211_txq_schedule_start() - * and ieee80211_txq_schedule_end(). - */ -void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); - -/** - * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC + * ieee80211_txq_schedule_start - start new scheduling round for TXQs * * @hw: pointer as obtained from ieee80211_alloc_hw() * @ac: AC number to acquire locks for * - * Acquire locks needed to schedule TXQs from the given AC. Should be called - * before ieee80211_next_txq() or ieee80211_return_txq(). + * Should be called before ieee80211_next_txq() or ieee80211_return_txq(). + * The driver must not call multiple TXQ scheduling rounds concurrently. */ -void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) - __acquires(txq_lock); +void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac); + +/* (deprecated) */ +static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) +{ +} + +void __ieee80211_schedule_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq, bool force); /** - * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC + * ieee80211_schedule_txq - schedule a TXQ for transmission * * @hw: pointer as obtained from ieee80211_alloc_hw() - * @ac: AC number to acquire locks for + * @txq: pointer obtained from station or virtual interface * - * Release locks previously acquired by ieee80211_txq_schedule_end(). + * Schedules a TXQ for transmission if it is not already scheduled, + * even if mac80211 does not have any packets buffered. + * + * The driver may call this function if it has buffered packets for + * this TXQ internally. */ -void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) - __releases(txq_lock); +static inline void +ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) +{ + __ieee80211_schedule_txq(hw, txq, true); +} /** - * ieee80211_schedule_txq - schedule a TXQ for transmission + * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() * * @hw: pointer as obtained from ieee80211_alloc_hw() * @txq: pointer obtained from station or virtual interface + * @force: schedule txq even if mac80211 does not have any buffered packets. * - * Schedules a TXQ for transmission if it is not already scheduled. Takes a - * lock, which means it must *not* be called between - * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end() + * The driver may set force=true if it has buffered packets for this TXQ + * internally. */ -void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) - __acquires(txq_lock) __releases(txq_lock); +static inline void +ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, + bool force) +{ + __ieee80211_schedule_txq(hw, txq, force); +} /** * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit diff --git a/include/net/netrom.h b/include/net/netrom.h index 5a0714ff500f..80f15b1c1a48 100644 --- a/include/net/netrom.h +++ b/include/net/netrom.h @@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *); int nr_t1timer_running(struct sock *); /* sysctl_net_netrom.c */ -void nr_register_sysctl(void); +int nr_register_sysctl(void); void nr_unregister_sysctl(void); #endif diff --git a/include/net/sock.h b/include/net/sock.h index 8de5ee258b93..341f8bafa0cf 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2084,12 +2084,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq) * @p: poll_table * * See the comments in the wq_has_sleeper function. - * - * Do not derive sock from filp->private_data here. An SMC socket establishes - * an internal TCP socket that is used in the fallback case. All socket - * operations on the SMC socket are then forwarded to the TCP socket. In case of - * poll, the filp->private_data pointer references the SMC socket because the - * TCP socket has no file assigned. */ static inline void sock_poll_wait(struct file *filp, struct socket *sock, poll_table *p) diff --git a/include/net/tls.h b/include/net/tls.h index a5a938583295..5934246b2c6f 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); void tls_device_sk_destruct(struct sock *sk); +void tls_device_free_resources_tx(struct sock *sk); void tls_device_init(void); void tls_device_cleanup(void); int tls_tx_records(struct sock *sk, int flags); @@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx, int flags); int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, int flags); +bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx); static inline struct tls_msg *tls_msg(struct sk_buff *skb) { @@ -379,7 +381,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) { #ifdef CONFIG_SOCK_VALIDATE_XMIT - return sk_fullsock(sk) & + return sk_fullsock(sk) && (smp_load_acquire(&sk->sk_validate_xmit_skb) == &tls_validate_xmit_skb); #else |