diff options
Diffstat (limited to 'drivers')
315 files changed, 3407 insertions, 2407 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index d4551e33fa71..5a7551d060f2 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, /* Move to ITS specific data */ its = (struct acpi_iort_its_group *)node->node_data; - if (idx > its->its_count) { - dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", + if (idx >= its->its_count) { + dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n", idx, its->its_count); return -ENXIO; } @@ -1256,12 +1256,12 @@ static int __init arm_smmu_v3_set_proximity(struct device *dev, smmu = (struct acpi_iort_smmu_v3 *)node->node_data; if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { - int node = acpi_map_pxm_to_node(smmu->pxm); + int dev_node = acpi_map_pxm_to_node(smmu->pxm); - if (node != NUMA_NO_NODE && !node_online(node)) + if (dev_node != NUMA_NO_NODE && !node_online(dev_node)) return -EINVAL; - set_dev_node(dev, node); + set_dev_node(dev, dev_node); pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", smmu->base_address, smmu->pxm); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 28cffaaf9d82..f616b16c1f0b 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -232,13 +232,15 @@ int acpi_device_set_power(struct acpi_device *device, int state) if (device->power.flags.power_resources) result = acpi_power_transition(device, target_state); } else { + int cur_state = device->power.state; + if (device->power.flags.power_resources) { result = acpi_power_transition(device, ACPI_STATE_D0); if (result) goto end; } - if (device->power.state == ACPI_STATE_D0) { + if (cur_state == ACPI_STATE_D0) { int psc; /* Nothing to do here if _PSC is not present. */ diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index c02fa27dd3f3..1413324982f0 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1282,7 +1282,7 @@ static ssize_t hw_error_scrub_store(struct device *dev, if (rc) return rc; - device_lock(dev); + nfit_device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); @@ -1299,7 +1299,7 @@ static ssize_t hw_error_scrub_store(struct device *dev, break; } } - device_unlock(dev); + nfit_device_unlock(dev); if (rc) return rc; return size; @@ -1319,7 +1319,7 @@ static ssize_t scrub_show(struct device *dev, ssize_t rc = -ENXIO; bool busy; - device_lock(dev); + nfit_device_lock(dev); nd_desc = dev_get_drvdata(dev); if (!nd_desc) { device_unlock(dev); @@ -1339,7 +1339,7 @@ static ssize_t scrub_show(struct device *dev, } mutex_unlock(&acpi_desc->init_mutex); - device_unlock(dev); + nfit_device_unlock(dev); return rc; } @@ -1356,14 +1356,14 @@ static ssize_t scrub_store(struct device *dev, if (val != 1) return -EINVAL; - device_lock(dev); + nfit_device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); } - device_unlock(dev); + nfit_device_unlock(dev); if (rc) return rc; return size; @@ -1749,9 +1749,9 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) struct acpi_device *adev = data; struct device *dev = &adev->dev; - device_lock(dev->parent); + nfit_device_lock(dev->parent); __acpi_nvdimm_notify(dev, event); - device_unlock(dev->parent); + nfit_device_unlock(dev->parent); } static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) @@ -3457,8 +3457,8 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) struct device *dev = acpi_desc->dev; /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ - device_lock(dev); - device_unlock(dev); + nfit_device_lock(dev); + nfit_device_unlock(dev); /* Bounce the init_mutex to complete initial registration */ mutex_lock(&acpi_desc->init_mutex); @@ -3602,8 +3602,8 @@ void acpi_nfit_shutdown(void *data) * acpi_nfit_ars_rescan() submissions have had a chance to * either submit or see ->cancel set. */ - device_lock(bus_dev); - device_unlock(bus_dev); + nfit_device_lock(bus_dev); + nfit_device_unlock(bus_dev); flush_workqueue(nfit_wq); } @@ -3746,9 +3746,9 @@ EXPORT_SYMBOL_GPL(__acpi_nfit_notify); static void acpi_nfit_notify(struct acpi_device *adev, u32 event) { - device_lock(&adev->dev); + nfit_device_lock(&adev->dev); __acpi_nfit_notify(&adev->dev, adev->handle, event); - device_unlock(&adev->dev); + nfit_device_unlock(&adev->dev); } static const struct acpi_device_id acpi_nfit_ids[] = { diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 6ee2b02af73e..24241941181c 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -312,6 +312,30 @@ static inline struct acpi_nfit_desc *to_acpi_desc( return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); } +#ifdef CONFIG_PROVE_LOCKING +static inline void nfit_device_lock(struct device *dev) +{ + device_lock(dev); + mutex_lock(&dev->lockdep_mutex); +} + +static inline void nfit_device_unlock(struct device *dev) +{ + mutex_unlock(&dev->lockdep_mutex); + device_unlock(dev); +} +#else +static inline void nfit_device_lock(struct device *dev) +{ + device_lock(dev); +} + +static inline void nfit_device_unlock(struct device *dev) +{ + device_unlock(dev); +} +#endif + const guid_t *to_nfit_uuid(enum nfit_uuids id); int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); void acpi_nfit_shutdown(void *data); diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 1e7ac0bd0d3a..f31544d3656e 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -541,6 +541,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag) } /** + * check_acpi_cpu_flag() - Determine if CPU node has a flag set + * @cpu: Kernel logical CPU number + * @rev: The minimum PPTT revision defining the flag + * @flag: The flag itself + * + * Check the node representing a CPU for a given flag. + * + * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or + * the table revision isn't new enough. + * 1, any passed flag set + * 0, flag unset + */ +static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) +{ + struct acpi_table_header *table; + acpi_status status; + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + struct acpi_pptt_processor *cpu_node = NULL; + int ret = -ENOENT; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return ret; + } + + if (table->revision >= rev) + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + + if (cpu_node) + ret = (cpu_node->flags & flag) != 0; + + acpi_put_table(table); + + return ret; +} + +/** * acpi_find_last_cache_level() - Determines the number of cache levels for a PE * @cpu: Kernel logical CPU number * @@ -605,6 +643,20 @@ int cache_setup_acpi(unsigned int cpu) } /** + * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread + * @cpu: Kernel logical CPU number + * + * Return: 1, a thread + * 0, not a thread + * -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or + * the table revision isn't new enough. + */ +int acpi_pptt_cpu_is_thread(unsigned int cpu) +{ + return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD); +} + +/** * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU * @cpu: Kernel logical CPU number * @level: The topological level for which we would like a unique ID @@ -664,7 +716,6 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level) return ret; } - /** * find_acpi_cpu_topology_package() - Determine a unique CPU package value * @cpu: Kernel logical CPU number diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 0e28270b0fd8..aad6be5c0af0 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void) acpi_gpe_apply_masked_gpes(); acpi_update_all_gpes(); + /* + * Although we call __add_memory() that is documented to require the + * device_hotplug_lock, it is not necessary here because this is an + * early code when userspace or any other code path cannot trigger + * hotplug/hotunplug operations. + */ mutex_lock(&acpi_scan_lock); /* * Enumerate devices in the ACPI namespace. diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 38a59a630cd4..dc1c83eafc22 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2988,7 +2988,7 @@ static void binder_transaction(struct binder_proc *proc, else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); - if (target_node && target_proc == proc) { + if (target_node && target_proc->pid == proc->pid) { binder_user_error("%d:%d got transaction to context manager from process owning it\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; @@ -3239,7 +3239,8 @@ static void binder_transaction(struct binder_proc *proc, buffer_offset = off_start_offset; off_end_offset = off_start_offset + tr->offsets_size; sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); - sg_buf_end_offset = sg_buf_offset + extra_buffers_size; + sg_buf_end_offset = sg_buf_offset + extra_buffers_size - + ALIGN(secctx_sz, sizeof(u64)); off_min = 0; for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; buffer_offset += sizeof(binder_size_t)) { diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 72312ad2e142..9e9583a6bba9 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port, hpriv->phys[port] = NULL; rc = 0; break; + case -EPROBE_DEFER: + /* Do not complain yet */ + break; default: dev_err(dev, @@ -408,7 +411,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, hpriv->mmio = devm_ioremap_resource(dev, platform_get_resource(pdev, IORESOURCE_MEM, 0)); if (IS_ERR(hpriv->mmio)) { - dev_err(dev, "no mmio space\n"); rc = PTR_ERR(hpriv->mmio); goto err_out; } diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index 173e6f2dd9af..eefda51f97d3 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) unsigned int ret; struct rm_feature_desc *desc; struct ata_taskfile tf; - static const char cdb[] = { GPCMD_GET_CONFIGURATION, + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION, 2, /* only 1 feature descriptor requested */ 0, 3, /* 3, removable medium feature */ 0, 0, 0,/* reserved */ diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index dc404492381d..28b92e3cc570 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -202,7 +202,7 @@ config GENERIC_ARCH_TOPOLOGY help Enable support for architectures common topology code: e.g., parsing CPU capacity information from DT, usage of such information for - appropriate scaling, sysfs interface for changing capacity values at + appropriate scaling, sysfs interface for reading capacity values at runtime. endmenu diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 63c1e76739f1..b54d241a2ff5 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -15,6 +15,11 @@ #include <linux/string.h> #include <linux/sched/topology.h> #include <linux/cpuset.h> +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/percpu.h> +#include <linux/sched.h> +#include <linux/smp.h> DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; @@ -241,3 +246,296 @@ static void parsing_done_workfn(struct work_struct *work) #else core_initcall(free_raw_capacity); #endif + +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) +static int __init get_cpu_for_node(struct device_node *node) +{ + struct device_node *cpu_node; + int cpu; + + cpu_node = of_parse_phandle(node, "cpu", 0); + if (!cpu_node) + return -1; + + cpu = of_cpu_node_to_id(cpu_node); + if (cpu >= 0) + topology_parse_cpu_capacity(cpu_node, cpu); + else + pr_crit("Unable to find CPU node for %pOF\n", cpu_node); + + of_node_put(cpu_node); + return cpu; +} + +static int __init parse_core(struct device_node *core, int package_id, + int core_id) +{ + char name[10]; + bool leaf = true; + int i = 0; + int cpu; + struct device_node *t; + + do { + snprintf(name, sizeof(name), "thread%d", i); + t = of_get_child_by_name(core, name); + if (t) { + leaf = false; + cpu = get_cpu_for_node(t); + if (cpu >= 0) { + cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].core_id = core_id; + cpu_topology[cpu].thread_id = i; + } else { + pr_err("%pOF: Can't get CPU for thread\n", + t); + of_node_put(t); + return -EINVAL; + } + of_node_put(t); + } + i++; + } while (t); + + cpu = get_cpu_for_node(core); + if (cpu >= 0) { + if (!leaf) { + pr_err("%pOF: Core has both threads and CPU\n", + core); + return -EINVAL; + } + + cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].core_id = core_id; + } else if (leaf) { + pr_err("%pOF: Can't get CPU for leaf core\n", core); + return -EINVAL; + } + + return 0; +} + +static int __init parse_cluster(struct device_node *cluster, int depth) +{ + char name[10]; + bool leaf = true; + bool has_cores = false; + struct device_node *c; + static int package_id __initdata; + int core_id = 0; + int i, ret; + + /* + * First check for child clusters; we currently ignore any + * information about the nesting of clusters and present the + * scheduler with a flat list of them. + */ + i = 0; + do { + snprintf(name, sizeof(name), "cluster%d", i); + c = of_get_child_by_name(cluster, name); + if (c) { + leaf = false; + ret = parse_cluster(c, depth + 1); + of_node_put(c); + if (ret != 0) + return ret; + } + i++; + } while (c); + + /* Now check for cores */ + i = 0; + do { + snprintf(name, sizeof(name), "core%d", i); + c = of_get_child_by_name(cluster, name); + if (c) { + has_cores = true; + + if (depth == 0) { + pr_err("%pOF: cpu-map children should be clusters\n", + c); + of_node_put(c); + return -EINVAL; + } + + if (leaf) { + ret = parse_core(c, package_id, core_id++); + } else { + pr_err("%pOF: Non-leaf cluster with core %s\n", + cluster, name); + ret = -EINVAL; + } + + of_node_put(c); + if (ret != 0) + return ret; + } + i++; + } while (c); + + if (leaf && !has_cores) + pr_warn("%pOF: empty cluster\n", cluster); + + if (leaf) + package_id++; + + return 0; +} + +static int __init parse_dt_topology(void) +{ + struct device_node *cn, *map; + int ret = 0; + int cpu; + + cn = of_find_node_by_path("/cpus"); + if (!cn) { + pr_err("No CPU information found in DT\n"); + return 0; + } + + /* + * When topology is provided cpu-map is essentially a root + * cluster with restricted subnodes. + */ + map = of_get_child_by_name(cn, "cpu-map"); + if (!map) + goto out; + + ret = parse_cluster(map, 0); + if (ret != 0) + goto out_map; + + topology_normalize_cpu_scale(); + + /* + * Check that all cores are in the topology; the SMP code will + * only mark cores described in the DT as possible. + */ + for_each_possible_cpu(cpu) + if (cpu_topology[cpu].package_id == -1) + ret = -EINVAL; + +out_map: + of_node_put(map); +out: + of_node_put(cn); + return ret; +} +#endif + +/* + * cpu topology table + */ +struct cpu_topology cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); + +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); + + /* Find the smaller of NUMA, core or LLC siblings */ + if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { + /* not numa in package, lets use the package siblings */ + core_mask = &cpu_topology[cpu].core_sibling; + } + if (cpu_topology[cpu].llc_id != -1) { + if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) + core_mask = &cpu_topology[cpu].llc_sibling; + } + + return core_mask; +} + +void update_siblings_masks(unsigned int cpuid) +{ + struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; + int cpu; + + /* update core and thread sibling masks */ + for_each_online_cpu(cpu) { + cpu_topo = &cpu_topology[cpu]; + + if (cpuid_topo->llc_id == cpu_topo->llc_id) { + cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); + cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); + } + + if (cpuid_topo->package_id != cpu_topo->package_id) + continue; + + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id != cpu_topo->core_id) + continue; + + cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); + } +} + +static void clear_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpumask_clear(&cpu_topo->llc_sibling); + cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); + + cpumask_clear(&cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); +} + +void __init reset_cpu_topology(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpu_topo->thread_id = -1; + cpu_topo->core_id = -1; + cpu_topo->package_id = -1; + cpu_topo->llc_id = -1; + + clear_cpu_topology(cpu); + } +} + +void remove_cpu_topology(unsigned int cpu) +{ + int sibling; + + for_each_cpu(sibling, topology_core_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_llc_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); + + clear_cpu_topology(cpu); +} + +__weak int __init parse_acpi_topology(void) +{ + return 0; +} + +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) +void __init init_cpu_topology(void) +{ + reset_cpu_topology(); + + /* + * Discard anything that was parsed if we hit an error so we + * don't use partial information. + */ + if (parse_acpi_topology()) + reset_cpu_topology(); + else if (of_have_populated_dt() && parse_dt_topology()) + reset_cpu_topology(); +} +#endif diff --git a/drivers/base/core.c b/drivers/base/core.c index da84a73f2ba6..636058bbf48a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1663,6 +1663,9 @@ void device_initialize(struct device *dev) kobject_init(&dev->kobj, &device_ktype); INIT_LIST_HEAD(&dev->dma_pools); mutex_init(&dev->mutex); +#ifdef CONFIG_PROVE_LOCKING + mutex_init(&dev->lockdep_mutex); +#endif lockdep_set_novalidate_class(&dev->mutex); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); @@ -2211,6 +2214,24 @@ void put_device(struct device *dev) } EXPORT_SYMBOL_GPL(put_device); +bool kill_device(struct device *dev) +{ + /* + * Require the device lock and set the "dead" flag to guarantee that + * the update behavior is consistent with the other bitfields near + * it and that we cannot have an asynchronous probe routine trying + * to run while we are tearing out the bus/class/sysfs from + * underneath the device. + */ + lockdep_assert_held(&dev->mutex); + + if (dev->p->dead) + return false; + dev->p->dead = true; + return true; +} +EXPORT_SYMBOL_GPL(kill_device); + /** * device_del - delete device from system. * @dev: device. @@ -2230,15 +2251,8 @@ void device_del(struct device *dev) struct kobject *glue_dir = NULL; struct class_interface *class_intf; - /* - * Hold the device lock and set the "dead" flag to guarantee that - * the update behavior is consistent with the other bitfields near - * it and that we cannot have an asynchronous probe routine trying - * to run while we are tearing out the bus/class/sysfs from - * underneath the device. - */ device_lock(dev); - dev->p->dead = true; + kill_device(dev); device_unlock(dev); /* Notify clients of device removal. This call must come diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 7048a41973ed..7ecd590e67fe 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -141,8 +141,8 @@ int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed); int fw_map_paged_buf(struct fw_priv *fw_priv); #else static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {} -int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } -int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } +static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } +static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } #endif #endif /* __FIRMWARE_LOADER_H */ diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 85f20e371f2f..bd7d3bb8b890 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1726,6 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, /* MSch: invalidate default_params */ default_params[drive].blocks = 0; set_capacity(floppy->disk, MAX_DISK_SIZE * 2); + /* Fall through */ case FDFMTEND: case FDFLUSH: /* invalidate the buffer track to force a reread */ diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 90ebfcae0ce6..2b3103c30857 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -5417,7 +5417,7 @@ static int drbd_do_auth(struct drbd_connection *connection) unsigned int key_len; char secret[SHARED_SECRET_MAX]; /* 64 byte */ unsigned int resp_size; - SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm); + struct shash_desc *desc; struct packet_info pi; struct net_conf *nc; int err, rv; @@ -5430,6 +5430,13 @@ static int drbd_do_auth(struct drbd_connection *connection) memcpy(secret, nc->shared_secret, key_len); rcu_read_unlock(); + desc = kmalloc(sizeof(struct shash_desc) + + crypto_shash_descsize(connection->cram_hmac_tfm), + GFP_KERNEL); + if (!desc) { + rv = -1; + goto fail; + } desc->tfm = connection->cram_hmac_tfm; rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); @@ -5571,7 +5578,10 @@ static int drbd_do_auth(struct drbd_connection *connection) kfree(peers_ch); kfree(response); kfree(right_response); - shash_desc_zero(desc); + if (desc) { + shash_desc_zero(desc); + kfree(desc); + } return rv; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 44c9985f352a..3036883fc9f8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -924,6 +924,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, struct file *file; struct inode *inode; struct address_space *mapping; + struct block_device *claimed_bdev = NULL; int lo_flags = 0; int error; loff_t size; @@ -942,10 +943,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, * here to avoid changing device under exclusive owner. */ if (!(mode & FMODE_EXCL)) { - bdgrab(bdev); - error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd); - if (error) + claimed_bdev = bd_start_claiming(bdev, loop_set_fd); + if (IS_ERR(claimed_bdev)) { + error = PTR_ERR(claimed_bdev); goto out_putf; + } } error = mutex_lock_killable(&loop_ctl_mutex); @@ -1015,15 +1017,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, mutex_unlock(&loop_ctl_mutex); if (partscan) loop_reread_partitions(lo, bdev); - if (!(mode & FMODE_EXCL)) - blkdev_put(bdev, mode | FMODE_EXCL); + if (claimed_bdev) + bd_abort_claiming(bdev, claimed_bdev, loop_set_fd); return 0; out_unlock: mutex_unlock(&loop_ctl_mutex); out_bdev: - if (!(mode & FMODE_EXCL)) - blkdev_put(bdev, mode | FMODE_EXCL); + if (claimed_bdev) + bd_abort_claiming(bdev, claimed_bdev, loop_set_fd); out_putf: fput(file); out: diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9bcde2325893..e21d2ded732b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1231,7 +1231,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd, struct block_device *bdev) { sock_shutdown(nbd); - kill_bdev(bdev); + __invalidate_device(bdev, true); nbd_bdev_reset(bdev); if (test_and_clear_bit(NBD_HAS_CONFIG_REF, &nbd->config->runtime_flags)) diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index a55be205b91a..dbfe34664633 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + ath = kzalloc(sizeof(*ath), GFP_KERNEL); if (!ath) return -ENOMEM; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 8905ad2edde7..ae2624fce913 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu) bt_dev_dbg(hu->hdev, "hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + bcm = kzalloc(sizeof(*bcm), GFP_KERNEL); if (!bcm) return -ENOMEM; diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 207bae5e0d46..31f25153087d 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + intel = kzalloc(sizeof(*intel), GFP_KERNEL); if (!intel) return -ENOMEM; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 8950e07889fe..85a30fb9177b 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +/* Check the underlying device or tty has flow control support */ +bool hci_uart_has_flow_control(struct hci_uart *hu) +{ + /* serdev nodes check if the needed operations are present */ + if (hu->serdev) + return true; + + if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset) + return true; + + return false; +} + /* Flow control or un-flow control the device */ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) { diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index f98e5cc343b2..fbc3f7c3a5c7 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c @@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); if (!mrvl) return -ENOMEM; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 9a5c9c1f9484..82a0a3691a63 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -473,6 +473,9 @@ static int qca_open(struct hci_uart *hu) BT_DBG("hu %p qca_open", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); if (!qca) return -ENOMEM; diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index f11af3912ce6..6ab631101019 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu); int hci_uart_init_ready(struct hci_uart *hu); void hci_uart_init_work(struct work_struct *work); void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); +bool hci_uart_has_flow_control(struct hci_uart *hu); void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, unsigned int oper_speed); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 3e866885a405..2794f4b3f62d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -573,3 +573,12 @@ config RANDOM_TRUST_CPU has not installed a hidden back door to compromise the CPU's random number generation facilities. This can also be configured at boot with "random.trust_cpu=on/off". + +config RANDOM_TRUST_BOOTLOADER + bool "Trust the bootloader to initialize Linux's CRNG" + help + Some bootloaders can provide entropy to increase the kernel's initial + device randomness. Say Y here to assume the entropy provided by the + booloader is trustworthy so it will be added to the kernel's entropy + pool. Otherwise, say N here so it will be regarded as device input that + only mixes the entropy pool.
\ No newline at end of file diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 5c39f20378b8..9ac6671bb514 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -567,8 +567,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, unsigned long long m; m = hpets->hp_tick_freq + (dis >> 1); - do_div(m, dis); - return (unsigned long)m; + return div64_ul(m, dis); } static int diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c index 57204335c5f5..285e0b8f9a97 100644 --- a/drivers/char/ipmi/ipmb_dev_int.c +++ b/drivers/char/ipmi/ipmb_dev_int.c @@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count, struct ipmb_dev *ipmb_dev = to_ipmb_dev(file); struct ipmb_request_elem *queue_elem; struct ipmb_msg msg; - ssize_t ret; + ssize_t ret = 0; memset(&msg, 0, sizeof(msg)); diff --git a/drivers/char/random.c b/drivers/char/random.c index 5d5ea4ce1442..566922df4b7b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -2445,3 +2445,17 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, credit_entropy_bits(poolp, entropy); } EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); + +/* Handle random seed passed by bootloader. + * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise + * it would be regarded as device data. + * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. + */ +void add_bootloader_randomness(const void *buf, unsigned int size) +{ + if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER)) + add_hwgenerator_randomness(buf, size, size * 8); + else + add_device_randomness(buf, size); +} +EXPORT_SYMBOL_GPL(add_bootloader_randomness);
\ No newline at end of file diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index d47ad10a35fe..4838c6a9f0f2 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -77,6 +77,18 @@ static int tpm_go_idle(struct tpm_chip *chip) return chip->ops->go_idle(chip); } +static void tpm_clk_enable(struct tpm_chip *chip) +{ + if (chip->ops->clk_enable) + chip->ops->clk_enable(chip, true); +} + +static void tpm_clk_disable(struct tpm_chip *chip) +{ + if (chip->ops->clk_enable) + chip->ops->clk_enable(chip, false); +} + /** * tpm_chip_start() - power on the TPM * @chip: a TPM chip to use @@ -89,13 +101,12 @@ int tpm_chip_start(struct tpm_chip *chip) { int ret; - if (chip->ops->clk_enable) - chip->ops->clk_enable(chip, true); + tpm_clk_enable(chip); if (chip->locality == -1) { ret = tpm_request_locality(chip); if (ret) { - chip->ops->clk_enable(chip, false); + tpm_clk_disable(chip); return ret; } } @@ -103,8 +114,7 @@ int tpm_chip_start(struct tpm_chip *chip) ret = tpm_cmd_ready(chip); if (ret) { tpm_relinquish_locality(chip); - if (chip->ops->clk_enable) - chip->ops->clk_enable(chip, false); + tpm_clk_disable(chip); return ret; } @@ -124,8 +134,7 @@ void tpm_chip_stop(struct tpm_chip *chip) { tpm_go_idle(chip); tpm_relinquish_locality(chip); - if (chip->ops->clk_enable) - chip->ops->clk_enable(chip, false); + tpm_clk_disable(chip); } EXPORT_SYMBOL_GPL(tpm_chip_stop); @@ -545,6 +554,20 @@ static int tpm_add_hwrng(struct tpm_chip *chip) return hwrng_register(&chip->hwrng); } +static int tpm_get_pcr_allocation(struct tpm_chip *chip) +{ + int rc; + + rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ? + tpm2_get_pcr_allocation(chip) : + tpm1_get_pcr_allocation(chip); + + if (rc > 0) + return -ENODEV; + + return rc; +} + /* * tpm_chip_register() - create a character device for the TPM chip * @chip: TPM chip to use. @@ -564,6 +587,12 @@ int tpm_chip_register(struct tpm_chip *chip) if (rc) return rc; rc = tpm_auto_startup(chip); + if (rc) { + tpm_chip_stop(chip); + return rc; + } + + rc = tpm_get_pcr_allocation(chip); tpm_chip_stop(chip); if (rc) return rc; diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index e503ffc3aa39..a7fea3e0ca86 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -394,6 +394,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, const char *desc, size_t min_cap_length); int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max); +int tpm1_get_pcr_allocation(struct tpm_chip *chip); unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm_pm_suspend(struct device *dev); int tpm_pm_resume(struct device *dev); @@ -449,6 +450,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, const char *desc); +ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip); int tpm2_auto_startup(struct tpm_chip *chip); void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c index faacbe1ffa1a..149e953ca369 100644 --- a/drivers/char/tpm/tpm1-cmd.c +++ b/drivers/char/tpm/tpm1-cmd.c @@ -699,18 +699,6 @@ int tpm1_auto_startup(struct tpm_chip *chip) goto out; } - chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), - GFP_KERNEL); - if (!chip->allocated_banks) { - rc = -ENOMEM; - goto out; - } - - chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; - chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; - chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; - chip->nr_allocated_banks = 1; - return rc; out: if (rc > 0) @@ -779,3 +767,27 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr) return rc; } +/** + * tpm1_get_pcr_allocation() - initialize the allocated bank + * @chip: TPM chip to use. + * + * The function initializes the SHA1 allocated bank to extend PCR + * + * Return: + * * 0 on success, + * * < 0 on error. + */ +int tpm1_get_pcr_allocation(struct tpm_chip *chip) +{ + chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), + GFP_KERNEL); + if (!chip->allocated_banks) + return -ENOMEM; + + chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; + chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; + chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; + chip->nr_allocated_banks = 1; + + return 0; +} diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index d103545e4055..ba9acae83bff 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -840,7 +840,7 @@ struct tpm2_pcr_selection { u8 pcr_select[3]; } __packed; -static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) +ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) { struct tpm2_pcr_selection pcr_selection; struct tpm_buf buf; @@ -1040,10 +1040,6 @@ int tpm2_auto_startup(struct tpm_chip *chip) goto out; } - rc = tpm2_get_pcr_allocation(chip); - if (rc) - goto out; - rc = tpm2_get_cc_attrs_tbl(chip); out: diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index 44db83a6d01c..44a46dcc0518 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw, continue; div = DIV_ROUND_CLOSEST(parent_rate, req->rate); + if (div > GENERATED_MAX_DIV + 1) + div = GENERATED_MAX_DIV + 1; clk_generated_best_diff(req, parent, parent_rate, div, &best_diff, &best_rate); diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 1aa5f4059251..73b7e238eee7 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = { FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000), }; +static const struct mtk_fixed_factor top_early_divs[] = { + FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2), +}; + static const struct mtk_fixed_factor top_divs[] = { - FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, - 2), FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1, 2), FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1, @@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev) return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); } +static struct clk_onecell_data *top_clk_data; + +static void clk_mt8183_top_init_early(struct device_node *node) +{ + int i; + + top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + + for (i = 0; i < CLK_TOP_NR_CLK; i++) + top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); + + mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), + top_clk_data); + + of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); +} + +CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen", + clk_mt8183_top_init_early); + static int clk_mt8183_top_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); void __iomem *base; - struct clk_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); - clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); - mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), - clk_data); + top_clk_data); + + mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), + top_clk_data); - mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data); mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), - node, &mt8183_clk_lock, clk_data); + node, &mt8183_clk_lock, top_clk_data); mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes), - base, &mt8183_clk_lock, clk_data); + base, &mt8183_clk_lock, top_clk_data); mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), - base, &mt8183_clk_lock, clk_data); + base, &mt8183_clk_lock, top_clk_data); mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), - clk_data); + top_clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); } static int clk_mt8183_infra_probe(struct platform_device *pdev) diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 52bbb9ce3807..d4075b130674 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev, unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); /* Reset module */ - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ udelay(35); @@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id) unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); return 0; } diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig index 91d3d721c801..3c219af25100 100644 --- a/drivers/clk/sprd/Kconfig +++ b/drivers/clk/sprd/Kconfig @@ -3,6 +3,7 @@ config SPRD_COMMON_CLK tristate "Clock support for Spreadtrum SoCs" depends on ARCH_SPRD || COMPILE_TEST default ARCH_SPRD + select REGMAP_MMIO if SPRD_COMMON_CLK diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index 23553ed6b548..2d22d6bf52f2 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -248,16 +248,12 @@ static int __maybe_unused cn_proc_show(struct seq_file *m, void *v) return 0; } -static struct cn_dev cdev = { - .input = cn_rx_skb, -}; - static int cn_init(void) { struct cn_dev *dev = &cdev; struct netlink_kernel_cfg cfg = { .groups = CN_NETLINK_USERS + 0xf, - .input = dev->input, + .input = cn_rx_skb, }; dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg); diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index 93f39a1d4c3d..c66f566a854c 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -131,10 +131,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) int err = -ENODEV; cpu = of_get_cpu_node(policy->cpu, NULL); + if (!cpu) + goto out; + max_freqp = of_get_property(cpu, "clock-frequency", NULL); of_node_put(cpu); - if (!cpu) + if (!max_freqp) { + err = -EINVAL; goto out; + } + + /* we need the freq in kHz */ + max_freq = *max_freqp / 1000; dn = of_find_compatible_node(NULL, NULL, "1682m-sdc"); if (!dn) @@ -171,16 +179,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) } pr_debug("init cpufreq on CPU %d\n", policy->cpu); - - max_freqp = of_get_property(cpu, "clock-frequency", NULL); - if (!max_freqp) { - err = -EINVAL; - goto out_unmap_sdcpwr; - } - - /* we need the freq in kHz */ - max_freq = *max_freqp / 1000; - pr_debug("max clock-frequency is at %u kHz\n", max_freq); pr_debug("initializing frequency table\n"); @@ -199,9 +197,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); return 0; -out_unmap_sdcpwr: - iounmap(sdcpwr_mapbase); - out_unmap_sdcasr: iounmap(sdcasr_mapbase); out: diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 48cb3d4bb7d1..d8530475493c 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -13,6 +13,16 @@ config ARM_CPUIDLE initialized by calling the CPU operations init idle hook provided by architecture code. +config ARM_PSCI_CPUIDLE + bool "PSCI CPU idle Driver" + depends on ARM_PSCI_FW + select DT_IDLE_STATES + select CPU_IDLE_MULTIPLE_DRIVERS + help + Select this to enable PSCI firmware based CPUidle driver for ARM. + It provides an idle driver that is capable of detecting and + managing idle states through the PSCI firmware interface. + config ARM_BIG_LITTLE_CPUIDLE bool "Support for ARM big.LITTLE processors" depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 9d7176cee3d3..40d016339b29 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o +obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o ############################################################################### # MIPS drivers diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 5bcd82c35dcf..9e5156d39627 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -15,7 +15,6 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> -#include <linux/topology.h> #include <asm/cpuidle.h> @@ -106,11 +105,17 @@ static int __init arm_idle_init_cpu(int cpu) ret = arm_cpuidle_init(cpu); /* - * Allow the initialization to continue for other CPUs, if the reported - * failure is a HW misconfiguration/breakage (-ENXIO). + * Allow the initialization to continue for other CPUs, if the + * reported failure is a HW misconfiguration/breakage (-ENXIO). + * + * Some platforms do not support idle operations + * (arm_cpuidle_init() returning -EOPNOTSUPP), we should + * not flag this case as an error, it is a valid + * configuration. */ if (ret) { - pr_err("CPU %d failed to init idle CPU ops\n", cpu); + if (ret != -EOPNOTSUPP) + pr_err("CPU %d failed to init idle CPU ops\n", cpu); ret = ret == -ENXIO ? 0 : ret; goto out_kfree_drv; } diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c new file mode 100644 index 000000000000..f3c1a2396f98 --- /dev/null +++ b/drivers/cpuidle/cpuidle-psci.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PSCI CPU idle driver. + * + * Copyright (C) 2019 ARM Ltd. + * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> + */ + +#define pr_fmt(fmt) "CPUidle PSCI: " fmt + +#include <linux/cpuidle.h> +#include <linux/cpumask.h> +#include <linux/cpu_pm.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/psci.h> +#include <linux/slab.h> + +#include <asm/cpuidle.h> + +#include "dt_idle_states.h" + +static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); + +static int psci_enter_idle_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + u32 *state = __this_cpu_read(psci_power_state); + + return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, + idx, state[idx - 1]); +} + +static struct cpuidle_driver psci_idle_driver __initdata = { + .name = "psci_idle", + .owner = THIS_MODULE, + /* + * PSCI idle states relies on architectural WFI to + * be represented as state index 0. + */ + .states[0] = { + .enter = psci_enter_idle_state, + .exit_latency = 1, + .target_residency = 1, + .power_usage = UINT_MAX, + .name = "WFI", + .desc = "ARM WFI", + } +}; + +static const struct of_device_id psci_idle_state_match[] __initconst = { + { .compatible = "arm,idle-state", + .data = psci_enter_idle_state }, + { }, +}; + +static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) +{ + int err = of_property_read_u32(np, "arm,psci-suspend-param", state); + + if (err) { + pr_warn("%pOF missing arm,psci-suspend-param property\n", np); + return err; + } + + if (!psci_power_state_is_valid(*state)) { + pr_warn("Invalid PSCI power state %#x\n", *state); + return -EINVAL; + } + + return 0; +} + +static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) +{ + int i, ret = 0, count = 0; + u32 *psci_states; + struct device_node *state_node; + + /* Count idle states */ + while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", + count))) { + count++; + of_node_put(state_node); + } + + if (!count) + return -ENODEV; + + psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + if (!psci_states) + return -ENOMEM; + + for (i = 0; i < count; i++) { + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + ret = psci_dt_parse_state_node(state_node, &psci_states[i]); + of_node_put(state_node); + + if (ret) + goto free_mem; + + pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); + } + + /* Idle states parsed correctly, initialize per-cpu pointer */ + per_cpu(psci_power_state, cpu) = psci_states; + return 0; + +free_mem: + kfree(psci_states); + return ret; +} + +static __init int psci_cpu_init_idle(unsigned int cpu) +{ + struct device_node *cpu_node; + int ret; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) + return -ENODEV; + + ret = psci_dt_cpu_init_idle(cpu_node, cpu); + + of_node_put(cpu_node); + + return ret; +} + +static int __init psci_idle_init_cpu(int cpu) +{ + struct cpuidle_driver *drv; + struct device_node *cpu_node; + const char *enable_method; + int ret = 0; + + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) + return -ENODEV; + + /* + * Check whether the enable-method for the cpu is PSCI, fail + * if it is not. + */ + enable_method = of_get_property(cpu_node, "enable-method", NULL); + if (!enable_method || (strcmp(enable_method, "psci"))) + ret = -ENODEV; + + of_node_put(cpu_node); + if (ret) + return ret; + + drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + + drv->cpumask = (struct cpumask *)cpumask_of(cpu); + + /* + * Initialize idle states data, starting at index 1, since + * by default idle state 0 is the quiescent state reached + * by the cpu by executing the wfi instruction. + * + * If no DT idle states are detected (ret == 0) let the driver + * initialization fail accordingly since there is no reason to + * initialize the idle driver if only wfi is supported, the + * default archictectural back-end already executes wfi + * on idle entry. + */ + ret = dt_init_idle_driver(drv, psci_idle_state_match, 1); + if (ret <= 0) { + ret = ret ? : -ENODEV; + goto out_kfree_drv; + } + + /* + * Initialize PSCI idle states. + */ + ret = psci_cpu_init_idle(cpu); + if (ret) { + pr_err("CPU %d failed to PSCI idle\n", cpu); + goto out_kfree_drv; + } + + ret = cpuidle_register(drv, NULL); + if (ret) + goto out_kfree_drv; + + return 0; + +out_kfree_drv: + kfree(drv); + return ret; +} + +/* + * psci_idle_init - Initializes PSCI cpuidle driver + * + * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails + * to register cpuidle driver then rollback to cancel all CPUs + * registration. + */ +static int __init psci_idle_init(void) +{ + int cpu, ret; + struct cpuidle_driver *drv; + struct cpuidle_device *dev; + + for_each_possible_cpu(cpu) { + ret = psci_idle_init_cpu(cpu); + if (ret) + goto out_fail; + } + + return 0; + +out_fail: + while (--cpu >= 0) { + dev = per_cpu(cpuidle_devices, cpu); + drv = cpuidle_get_cpu_driver(dev); + cpuidle_unregister(drv); + kfree(drv); + } + + return ret; +} +device_initcall(psci_idle_init); diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 3dc1cbf849db..b785e936244f 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -957,7 +957,7 @@ static void set_broadcast_channel(struct fw_device *device, int generation) device->bc_implemented = BC_IMPLEMENTED; break; } - /* else fall through to case address error */ + /* else, fall through - to case address error */ case RCODE_ADDRESS_ERROR: device->bc_implemented = BC_UNIMPLEMENTED; } diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 42566b7be8f5..df8a56a979b9 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -284,7 +284,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, if ((data[0] & bit) == (data[1] & bit)) continue; - /* 1394-1995 IRM, fall through to retry. */ + /* fall through - It's a 1394-1995 IRM, retry. */ default: if (retry) { retry--; diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 46bd22dde535..94a13fca8267 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -54,6 +54,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) switch (port_type) { case SELFID_PORT_CHILD: (*child_port_count)++; + /* fall through */ case SELFID_PORT_PARENT: case SELFID_PORT_NCONN: (*total_port_count)++; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 53446e39a32c..ba8d3d0ef32c 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -157,7 +157,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK config ISCSI_IBFT_FIND bool "iSCSI Boot Firmware Table Attributes" - depends on X86 && ACPI + depends on X86 && ISCSI_IBFT default n help This option enables the kernel to find the region of memory @@ -168,7 +168,8 @@ config ISCSI_IBFT_FIND config ISCSI_IBFT tristate "iSCSI Boot Firmware Table Attributes module" select ISCSI_BOOT_SYSFS - depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL + select ISCSI_IBFT_FIND if X86 + depends on ACPI && SCSI && SCSI_LOWLEVEL default n help This option enables support for detection and exposing of iSCSI diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index ab3aa3983833..7e12cbdf957c 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -84,6 +84,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information"); MODULE_LICENSE("GPL"); MODULE_VERSION(IBFT_ISCSI_VERSION); +#ifndef CONFIG_ISCSI_IBFT_FIND +struct acpi_table_ibft *ibft_addr; +#endif + struct ibft_hdr { u8 id; u8 version; diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index f82ccd39a913..84f4ff351c62 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -103,7 +103,7 @@ static inline bool psci_power_state_loses_context(u32 state) return state & mask; } -static inline bool psci_power_state_is_valid(u32 state) +bool psci_power_state_is_valid(u32 state) { const u32 valid_mask = psci_has_ext_power_state() ? PSCI_1_0_EXT_POWER_STATE_MASK : @@ -277,175 +277,24 @@ static int __init psci_features(u32 psci_func_id) } #ifdef CONFIG_CPU_IDLE -static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); - -static int psci_dt_parse_state_node(struct device_node *np, u32 *state) -{ - int err = of_property_read_u32(np, "arm,psci-suspend-param", state); - - if (err) { - pr_warn("%pOF missing arm,psci-suspend-param property\n", np); - return err; - } - - if (!psci_power_state_is_valid(*state)) { - pr_warn("Invalid PSCI power state %#x\n", *state); - return -EINVAL; - } - - return 0; -} - -static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) -{ - int i, ret = 0, count = 0; - u32 *psci_states; - struct device_node *state_node; - - /* Count idle states */ - while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", - count))) { - count++; - of_node_put(state_node); - } - - if (!count) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); - if (!psci_states) - return -ENOMEM; - - for (i = 0; i < count; i++) { - state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); - ret = psci_dt_parse_state_node(state_node, &psci_states[i]); - of_node_put(state_node); - - if (ret) - goto free_mem; - - pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); - } - - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; - return 0; - -free_mem: - kfree(psci_states); - return ret; -} - -#ifdef CONFIG_ACPI -#include <acpi/processor.h> - -static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) -{ - int i, count; - u32 *psci_states; - struct acpi_lpi_state *lpi; - struct acpi_processor *pr = per_cpu(processors, cpu); - - if (unlikely(!pr || !pr->flags.has_lpi)) - return -EINVAL; - - count = pr->power.count - 1; - if (count <= 0) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); - if (!psci_states) - return -ENOMEM; - - for (i = 0; i < count; i++) { - u32 state; - - lpi = &pr->power.lpi_states[i + 1]; - /* - * Only bits[31:0] represent a PSCI power_state while - * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification - */ - state = lpi->address; - if (!psci_power_state_is_valid(state)) { - pr_warn("Invalid PSCI power state %#x\n", state); - kfree(psci_states); - return -EINVAL; - } - psci_states[i] = state; - } - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; - return 0; -} -#else -static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) -{ - return -EINVAL; -} -#endif - -int psci_cpu_init_idle(unsigned int cpu) -{ - struct device_node *cpu_node; - int ret; - - /* - * If the PSCI cpu_suspend function hook has not been initialized - * idle states must not be enabled, so bail out - */ - if (!psci_ops.cpu_suspend) - return -EOPNOTSUPP; - - if (!acpi_disabled) - return psci_acpi_cpu_init_idle(cpu); - - cpu_node = of_get_cpu_node(cpu, NULL); - if (!cpu_node) - return -ENODEV; - - ret = psci_dt_cpu_init_idle(cpu_node, cpu); - - of_node_put(cpu_node); - - return ret; -} - -static int psci_suspend_finisher(unsigned long index) +static int psci_suspend_finisher(unsigned long state) { - u32 *state = __this_cpu_read(psci_power_state); + u32 power_state = state; - return psci_ops.cpu_suspend(state[index - 1], - __pa_symbol(cpu_resume)); + return psci_ops.cpu_suspend(power_state, __pa_symbol(cpu_resume)); } -int psci_cpu_suspend_enter(unsigned long index) +int psci_cpu_suspend_enter(u32 state) { int ret; - u32 *state = __this_cpu_read(psci_power_state); - /* - * idle state index 0 corresponds to wfi, should never be called - * from the cpu_suspend operations - */ - if (WARN_ON_ONCE(!index)) - return -EINVAL; - if (!psci_power_state_loses_context(state[index - 1])) - ret = psci_ops.cpu_suspend(state[index - 1], 0); + if (!psci_power_state_loses_context(state)) + ret = psci_ops.cpu_suspend(state, 0); else - ret = cpu_suspend(index, psci_suspend_finisher); + ret = cpu_suspend(state, psci_suspend_finisher); return ret; } - -/* ARM specific CPU idle operations */ -#ifdef CONFIG_ARM -static const struct cpuidle_ops psci_cpuidle_ops __initconst = { - .suspend = psci_cpu_suspend_enter, - .init = psci_dt_cpu_init_idle, -}; - -CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops); -#endif #endif static int psci_system_suspend(unsigned long unused) diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index f3659443f8c2..6a445397771c 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -228,8 +228,11 @@ out_free_cpus: static void dummy_callback(struct timer_list *unused) {} -static int suspend_cpu(int index, bool broadcast) +static int suspend_cpu(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { + struct cpuidle_state *state = &drv->states[index]; + bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; int ret; arch_cpu_idle_enter(); @@ -254,11 +257,7 @@ static int suspend_cpu(int index, bool broadcast) } } - /* - * Replicate the common ARM cpuidle enter function - * (arm_enter_idle_state). - */ - ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index); + ret = state->enter(dev, drv, index); if (broadcast) tick_broadcast_exit(); @@ -301,9 +300,8 @@ static int suspend_test_thread(void *arg) * doesn't use PSCI). */ for (index = 1; index < drv->state_count; ++index) { - struct cpuidle_state *state = &drv->states[index]; - bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; int ret; + struct cpuidle_state *state = &drv->states[index]; /* * Set the timer to wake this CPU up in some time (which @@ -318,7 +316,7 @@ static int suspend_test_thread(void *arg) /* IRQs must be disabled during suspend operations. */ local_irq_disable(); - ret = suspend_cpu(index, broadcast); + ret = suspend_cpu(dev, drv, index); /* * We have woken up. Re-enable IRQs to handle any diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 474f304ec109..cdd4f73b4869 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -40,6 +40,7 @@ config ALTERA_PR_IP_CORE_PLAT config FPGA_MGR_ALTERA_PS_SPI tristate "Altera FPGA Passive Serial over SPI" depends on SPI + select BITREVERSE help FPGA manager driver support for Altera Arria/Cyclone/Stratix using the passive serial interface over SPI. diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3ee99d070608..f497003f119c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) - irqflags |= IRQF_TRIGGER_RISING; + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) - irqflags |= IRQF_TRIGGER_FALLING; + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; irqflags |= IRQF_ONESHOT; INIT_KFIFO(le->events); @@ -1392,12 +1394,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, for (i = 0; i < chip->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; - if (chip->get_direction && gpiochip_line_is_valid(chip, i)) - desc->flags = !chip->get_direction(chip, i) ? - (1 << FLAG_IS_OUT) : 0; - else - desc->flags = !chip->direction_input ? - (1 << FLAG_IS_OUT) : 0; + if (chip->get_direction && gpiochip_line_is_valid(chip, i)) { + if (!chip->get_direction(chip, i)) + set_bit(FLAG_IS_OUT, &desc->flags); + else + clear_bit(FLAG_IS_OUT, &desc->flags); + } else { + if (!chip->direction_input) + set_bit(FLAG_IS_OUT, &desc->flags); + else + clear_bit(FLAG_IS_OUT, &desc->flags); + } } acpi_gpiochip_add(chip); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1d80222587ad..3c88420e3497 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -394,7 +394,7 @@ config DRM_R128 config DRM_I810 tristate "Intel I810" # !PREEMPT because of missing ioctl locking - depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) + depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN) help Choose this option if you have an Intel I810 graphics card. If M is selected, the module will be called i810. AGP support is required diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1d3ee9c42f7e..6a5c96e519b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( adev->asic_type != CHIP_FIJI && adev->asic_type != CHIP_POLARIS10 && adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12) ? + adev->asic_type != CHIP_POLARIS12 && + adev->asic_type != CHIP_VEGAM) ? VI_BO_SIZE_ALIGN : 1; mapping_flags = AMDGPU_VM_PAGE_READABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e069de8b54e6..4e4094f842e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return r; } - fence = amdgpu_ctx_get_fence(ctx, entity, - deps[i].handle); + fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); + amdgpu_ctx_put(ctx); + + if (IS_ERR(fence)) + return PTR_ERR(fence); + else if (!fence) + continue; if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { - struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); + struct drm_sched_fence *s_fence; struct dma_fence *old = fence; + s_fence = to_drm_sched_fence(fence); fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(old); } - if (IS_ERR(fence)) { - r = PTR_ERR(fence); - amdgpu_ctx_put(ctx); + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + dma_fence_put(fence); + if (r) return r; - } else if (fence) { - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, - true); - dma_fence_put(fence); - amdgpu_ctx_put(ctx); - if (r) - return r; - } } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6d54decef7f8..5652cc72ed3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, thread = (*pos & GENMASK_ULL(59, 52)) >> 52; bank = (*pos & GENMASK_ULL(61, 60)) >> 60; - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2e8b4238efd..5376328d3fd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -148,7 +148,7 @@ struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; int amdgpu_ras_enable = -1; -uint amdgpu_ras_mask = 0xffffffff; +uint amdgpu_ras_mask = 0xfffffffb; /** * DOC: vramlimit (int) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8b7efd0a7028..2b546567853b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); - else if (adev->powerplay.pp_funcs->get_current_power_state) + if (is_support_sw_smu(adev)) { + if (adev->smu.ppt_funcs->get_current_power_state) + pm = amdgpu_smu_get_current_power_state(adev); + else + pm = adev->pm.dpm.user_state; + } else if (adev->powerplay.pp_funcs->get_current_power_state) { pm = amdgpu_dpm_get_current_power_state(adev); - else + } else { pm = adev->pm.dpm.user_state; + } return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, goto fail; } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); + } else if (adev->powerplay.pp_funcs->dispatch_tasks) { amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); } else { mutex_lock(&adev->pm.mutex); @@ -1734,7 +1742,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &speed); + err = smu_get_fan_speed_rpm(&adev->smu, &speed); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { @@ -1794,7 +1802,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &rpm); + err = smu_get_fan_speed_rpm(&adev->smu, &rpm); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { @@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); - /* UVD clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "UVD: Disabled\n"); - } else { - seq_printf(m, "UVD: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (DCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + if (adev->asic_type > CHIP_VEGA20) { + /* VCN clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCN: Disabled\n"); + } else { + seq_printf(m, "VCN: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } } - } - seq_printf(m, "\n"); + seq_printf(m, "\n"); + } else { + /* UVD clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "UVD: Disabled\n"); + } else { + seq_printf(m, "UVD: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } + } + seq_printf(m, "\n"); - /* VCE clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "VCE: Disabled\n"); - } else { - seq_printf(m, "VCE: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + /* VCE clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCE: Disabled\n"); + } else { + seq_printf(m, "VCE: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1a4412e47810..fac7aa2c244f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -136,11 +136,6 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, static int amdgpu_ras_release_vram(struct amdgpu_device *adev, struct amdgpu_bo **bo_ptr); -static void amdgpu_ras_self_test(struct amdgpu_device *adev) -{ - /* TODO */ -} - static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -689,6 +684,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + if (block_info.block_id != TA_RAS_BLOCK__UMC) { + DRM_INFO("%s error injection is not supported yet\n", + ras_block_str(info->head.block)); + return -EINVAL; + } + ret = psp_ras_trigger_error(&adev->psp, &block_info); if (ret) DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", @@ -1557,6 +1558,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev) amdgpu_ras_check_supported(adev, &con->hw_supported, &con->supported); + if (!con->hw_supported) { + amdgpu_ras_set_context(adev, NULL); + kfree(con); + return 0; + } + con->features = 0; INIT_LIST_HEAD(&con->head); /* Might need get this flag from vbios. */ @@ -1570,8 +1577,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (amdgpu_ras_fs_init(adev)) goto fs_out; - amdgpu_ras_self_test(adev); - DRM_INFO("RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 1675d5837c3c..32773b7523d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1441,6 +1441,15 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) } nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); + } } static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) @@ -4611,6 +4620,7 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, 0); WREG32(cp_int_cntl_reg, cp_int_cntl); + break; case AMDGPU_IRQ_STATE_ENABLE: cp_int_cntl = RREG32(cp_int_cntl_reg); cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0db9f488da7e..21187275dfd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1879,6 +1879,15 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[i].gws, 0); + WREG32(amdgpu_gds_reg_offset[i].oa, 0); + } } static void gfx_v7_0_config_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5f401b41ef7c..751567f78567 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3706,6 +3706,15 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) } vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[i].gws, 0); + WREG32(amdgpu_gds_reg_offset[i].oa, 0); + } } static void gfx_v8_0_config_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f4c4eea62526..1cf639a51178 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1918,6 +1918,15 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) } soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); + } } static void gfx_v9_0_constants_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 988c0adaca91..1cfc2620b2dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -372,11 +372,8 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.gpu_addr)); offset = size; - /* No signed header for now from firmware WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); } WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 792371442195..4e3fc284f6ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -668,6 +668,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, case CHIP_RAVEN: pcache_info = raven_cache_info; num_of_cache_types = ARRAY_SIZE(raven_cache_info); + break; case CHIP_NAVI10: pcache_info = navi10_cache_info; num_of_cache_types = ARRAY_SIZE(navi10_cache_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 4f8a6ffc5775..9cd3eb2d90bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -429,7 +429,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - pr_debug("%s@%i\n", __func__, __LINE__); case KFD_MQD_TYPE_COMPUTE: pr_debug("%s@%i\n", __func__, __LINE__); mqd->allocate_mqd = allocate_mqd; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index c1a92c16535c..5cc3acccda2a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -262,12 +262,12 @@ void dce110_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce110_max_clks_by_state, sizeof(dce110_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->regs = &disp_clk_regs; clk_mgr->clk_mgr_shift = &disp_clk_shift; clk_mgr->clk_mgr_mask = &disp_clk_mask; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 778392c73187..7c746ef1e32e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -226,12 +226,12 @@ void dce112_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce112_max_clks_by_state, sizeof(dce112_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->regs = &disp_clk_regs; clk_mgr->clk_mgr_shift = &disp_clk_shift; clk_mgr->clk_mgr_mask = &disp_clk_mask; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c index 906310c3e2eb..5399b8cf6b75 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c @@ -127,12 +127,12 @@ static struct clk_mgr_funcs dce120_funcs = { void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce120_max_clks_by_state, sizeof(dce120_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->base.dprefclk_khz = 600000; clk_mgr->base.funcs = &dce120_funcs; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 08a774fc7b67..50bfb5921de0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -301,6 +301,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, void dcn2_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.p_state_change_support = true; } void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) @@ -331,6 +333,7 @@ void dcn20_clk_mgr_construct( struct dccg *dccg) { clk_mgr->base.ctx = ctx; + clk_mgr->pp_smu = pp_smu; clk_mgr->base.funcs = &dcn2_funcs; clk_mgr->regs = &clk_mgr_regs; clk_mgr->clk_mgr_shift = &clk_mgr_shift; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4ef4dc63e221..fa20201eef3a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -502,8 +502,10 @@ void dc_stream_set_static_screen_events(struct dc *dc, static void destruct(struct dc *dc) { - dc_release_state(dc->current_state); - dc->current_state = NULL; + if (dc->current_state) { + dc_release_state(dc->current_state); + dc->current_state = NULL; + } destroy_links(dc); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 8dbf759eba45..355b4ba12796 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -532,6 +532,7 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) uint32_t read_dpcd_retry_cnt = 10; enum dc_status status = DC_ERROR_UNEXPECTED; int i; + union max_down_spread max_down_spread = { {0} }; // Read DPCD 00101h to find out the number of lanes currently set for (i = 0; i < read_dpcd_retry_cnt; i++) { @@ -553,8 +554,6 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) msleep(8); } - ASSERT(status == DC_OK); - // Read DPCD 00100h to find if standard link rates are set core_link_read_dpcd(link, DP_LINK_BW_SET, &link_bw_set, sizeof(link_bw_set)); @@ -576,6 +575,12 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) link->cur_link_settings.link_rate = link_bw_set; link->cur_link_settings.use_link_rate_set = false; } + // Read DPCD 00003h to find the max down spread. + core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, + &max_down_spread.raw, sizeof(max_down_spread)); + link->cur_link_settings.link_spread = + max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; } static bool detect_dp( @@ -717,13 +722,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) return false; } - if (link->connector_signal == SIGNAL_TYPE_EDP) { - /* On detect, we want to make sure current link settings are - * up to date, especially if link was powered on by GOP. - */ - read_edp_current_link_settings_on_detect(link); - } - prev_sink = link->local_sink; if (prev_sink != NULL) { dc_sink_retain(prev_sink); @@ -765,6 +763,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } case SIGNAL_TYPE_EDP: { + read_edp_current_link_settings_on_detect(link); detect_edp_sink_caps(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; @@ -2329,7 +2328,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { if (core_dc->current_state->res_ctx. pipe_ctx[i].stream->link - == link) + == link) { /* DMCU -1 for all controller id values, * therefore +1 here */ @@ -2337,6 +2336,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, core_dc->current_state-> res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; + + /* Disable brightness ramping when the display is blanked + * as it can hang the DMCU + */ + if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) + frame_ramp = 0; + } } } abm->funcs->set_backlight_level_pwm( @@ -2984,8 +2990,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc, /* Retrain with preferred link settings only relevant for * DP signal type + * Check for non-DP signal or if passive dongle present */ - if (!dc_is_dp_signal(link->connector_signal)) + if (!dc_is_dp_signal(link->connector_signal) || + link->dongle_max_pix_clk > 0) return; for (i = 0; i < MAX_PIPES; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 056be4c34a98..2c7aaed907b9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2230,18 +2230,25 @@ static void get_active_converter_info( link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); + link->dpcd_caps.is_branch_dev = false; return; } /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ - link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) { + link->dpcd_caps.is_branch_dev = false; + } + + else { + link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + } switch (ds_port.fields.PORT_TYPE) { case DOWNSTREAM_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; break; - case DOWNSTREAM_DVI_HDMI: - /* At this point we don't know is it DVI or HDMI, + case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: + /* At this point we don't know is it DVI or HDMI or DP++, * assume DVI.*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; break; @@ -2258,6 +2265,10 @@ static void get_active_converter_info( det_caps, sizeof(det_caps)); switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { + /*Handle DP case as DONGLE_NONE*/ + case DOWN_STREAM_DETAILED_DP: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; case DOWN_STREAM_DETAILED_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; @@ -2267,6 +2278,8 @@ static void get_active_converter_info( DISPLAY_DONGLE_DP_DVI_CONVERTER; break; case DOWN_STREAM_DETAILED_HDMI: + case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: + /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_HDMI_CONVERTER; @@ -2282,14 +2295,18 @@ static void get_active_converter_info( link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = - hdmi_caps.bits.YCrCr422_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = - hdmi_caps.bits.YCrCr420_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = - hdmi_caps.bits.YCrCr422_CONVERSION; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = - hdmi_caps.bits.YCrCr420_CONVERSION; + /*YCBCR capability only for HDMI case*/ + if (port_caps->bits.DWN_STRM_PORTX_TYPE + == DOWN_STREAM_DETAILED_HDMI) { + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = + hdmi_caps.bits.YCrCr422_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = + hdmi_caps.bits.YCrCr420_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = + hdmi_caps.bits.YCrCr422_CONVERSION; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = + hdmi_caps.bits.YCrCr420_CONVERSION; + } link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = translate_dpcd_max_bpc( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 51a78283a86d..2ceaab4fb5de 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -258,7 +258,7 @@ bool resource_construct( * PORT_CONNECTIVITY == 1 (as instructed by HW team). */ update_num_audio(&straps, &num_audio, &pool->audio_support); - for (i = 0; i < pool->pipe_count && i < num_audio; i++) { + for (i = 0; i < caps->num_audio; i++) { struct audio *aud = create_funcs->create_audio(ctx, i); if (aud == NULL) { @@ -1669,6 +1669,12 @@ static struct audio *find_first_free_audio( return pool->audios[i]; } } + + /* use engine id to find free audio */ + if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) { + return pool->audios[id]; + } + /*not found the matching one, first come first serve*/ for (i = 0; i < pool->audio_count; i++) { if (res_ctx->is_audio_acquired[i] == false) { @@ -1833,6 +1839,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) pix_clk /= 2; if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) { switch (timing->display_color_depth) { + case COLOR_DEPTH_666: case COLOR_DEPTH_888: normalized_pix_clk = pix_clk; break; @@ -1979,7 +1986,7 @@ enum dc_status resource_map_pool_resources( /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && - stream->audio_info.mode_count) { + stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index af7f8be230f7..352862370390 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -612,7 +612,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, pipe_ctx->stream->dmdata_address = attr->address; - if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { + if (pipe_ctx->stream_res.stream_enc && + pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { if (pipe_ctx->stream->dmdata_address.quad_part != 0) { /* if using dynamic meta, don't set up generic infopackets */ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index f8903bcabe49..58bd131d5b48 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -239,6 +239,10 @@ static void dmcu_set_backlight_level( s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); REG_WRITE(BIOS_SCRATCH_2, s2); + + /* waitDMCUReadyForCmd */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, + 0, 1, 80000); } static void dce_abm_init(struct abm *abm) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 858a58856ebd..fafb4b470140 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -965,11 +965,17 @@ void hwss_edp_backlight_control( void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *core_dc; struct pp_smu_funcs *pp_smu = NULL; - struct clk_mgr *clk_mgr = core_dc->clk_mgr; + struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; + if (!pipe_ctx->stream) + return; + + core_dc = pipe_ctx->stream->ctx->dc; + clk_mgr = core_dc->clk_mgr; + if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; @@ -999,9 +1005,15 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) { - struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc *dc; struct pp_smu_funcs *pp_smu = NULL; - struct clk_mgr *clk_mgr = dc->clk_mgr; + struct clk_mgr *clk_mgr; + + if (!pipe_ctx || !pipe_ctx->stream) + return; + + dc = pipe_ctx->stream->ctx->dc; + clk_mgr = dc->clk_mgr; if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) return; @@ -1009,6 +1021,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); if (pipe_ctx->stream_res.audio) { + pipe_ctx->stream_res.audio->enabled = false; + if (dc->res_pool->pp_smu) pp_smu = dc->res_pool->pp_smu; @@ -1039,8 +1053,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) /* dal_audio_disable_azalia_audio_jack_presence(stream->audio, * stream->stream_engine_id); */ - if (pipe_ctx->stream_res.audio) - pipe_ctx->stream_res.audio->enabled = false; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index e50a696fcb5d..2118ea21d7e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1195,16 +1195,7 @@ static void dcn10_init_hw(struct dc *dc) * everything down. */ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct hubp *hubp = dc->res_pool->hubps[i]; - struct dpp *dpp = dc->res_pool->dpps[i]; - - hubp->funcs->hubp_init(hubp); - dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; - plane_atomic_power_down(dc, dpp, hubp); - } - - apply_DEGVIDCN10_253_wa(dc); + dc->hwss.init_pipes(dc, dc->current_state); } for (i = 0; i < dc->res_pool->audio_count; i++) { @@ -1375,10 +1366,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, return result; } - - - - static bool dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) @@ -2516,6 +2503,12 @@ static void dcn10_apply_ctx_for_surface( if (removed_pipe[i]) dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (removed_pipe[i]) { + dc->hwss.optimize_bandwidth(dc, context); + break; + } + if (dc->hwseq->wa.DEGVIDCN10_254) hubbub1_wm_change_req_wa(dc->res_pool->hubbub); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1a20461c2937..a12530a3ab9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -508,7 +508,7 @@ static const struct resource_caps rv2_res_cap = { .num_audio = 3, .num_stream_encoder = 3, .num_pll = 3, - .num_ddc = 3, + .num_ddc = 4, }; static const struct dc_plane_cap plane_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 51a3dfe97f0e..31aa6ee5cd5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -102,14 +102,19 @@ void dccg2_init(struct dccg *dccg) switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { case 6: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); + /* Fall through */ case 5: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); + /* Fall through */ case 4: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); + /* Fall through */ case 3: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); + /* Fall through */ case 2: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); + /* Fall through */ case 1: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index ece6e136437b..6e2dbd03f9bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -337,6 +337,7 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne break; default: ASSERT(false); + block_size = page_table_block_size; break; } @@ -366,25 +367,24 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base); + FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top); + FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset); + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot); + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top); + AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base); + AGP_BASE, pa_config->system_aperture.agp_base >> 24); if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { - phys_config.depth = 1; - phys_config.block_size = 4096; phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; - + phys_config.depth = 0; + phys_config.block_size = 0; // Init VMID 0 based on PA config dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 0b84a322b8a2..d810c8940129 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1153,8 +1153,8 @@ void dcn20_enable_plane( apt.sys_default.quad_part = 0; - apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr; - apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr; + apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; + apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; // Program system aperture settings pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); @@ -1242,6 +1242,8 @@ void dcn20_pipe_control_lock_global( CRTC_STATE_VACTIVE); pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, + CRTC_STATE_VACTIVE); pipe->stream_res.tg->funcs->lock_doublebuffer_disable( pipe->stream_res.tg); } @@ -1263,6 +1265,17 @@ void dcn20_pipe_control_lock( if (pipe->plane_state != NULL) flip_immediate = pipe->plane_state->flip_immediate; + if (flip_immediate && lock) { + while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) { + udelay(1); + } + + if (pipe->bottom_pipe != NULL) + while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) { + udelay(1); + } + } + /* In flip immediate and pipe splitting case, we need to use GSL * for synchronization. Only do setup on locking and on flip type change. */ @@ -1740,8 +1753,11 @@ static void dcn20_reset_back_end_for_pipe( else if (pipe_ctx->stream_res.audio) { dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE); } - } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + else if (pipe_ctx->stream_res.dsc) + dp_set_dsc_enable(pipe_ctx, false); +#endif /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 26a66ccf6e72..1ae973962d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -535,7 +535,7 @@ void dcn20_timing_generator_init(struct optc *optc1) optc1->min_h_blank = 32; optc1->min_v_blank = 3; optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 8; + optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue. optc1->min_v_sync_width = 1; optc1->comb_opp_id = 0xf; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index d200bc3cec71..b949e202d6cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2643,6 +2643,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; + else + // Accounting for SOC/DCF relationship, we can go as high as + // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. + min_dcfclk = 507; for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c index 27679ef6ebe8..96c263223315 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c @@ -23,6 +23,8 @@ * */ +#include <linux/delay.h> + #include "dcn20_vmid.h" #include "reg_helper.h" @@ -36,6 +38,38 @@ #define FN(reg_name, field_name) \ vmid->shifts->field_name, vmid->masks->field_name +static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid) +{ + /* According the hardware spec, we need to poll for the lowest + * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM + * context is updated. We can't use REG_WAIT here since we + * don't have a seperate field to wait on. + * + * TODO: Confirm timeout / poll interval with hardware team + */ + + int max_times = 10000; + int delay_us = 5; + int i; + + for (i = 0; i < max_times; ++i) { + uint32_t entry_lo32; + + REG_GET(PAGE_TABLE_BASE_ADDR_LO32, + VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, + &entry_lo32); + + if (entry_lo32 & 0x1) + return; + + udelay(delay_us); + } + + /* VM setup timed out */ + DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n"); + ASSERT(0); +} + void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config) { REG_SET(PAGE_TABLE_START_ADDR_HI32, 0, @@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_ REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF); + /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */ REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF); + + dcn20_wait_for_vmid_ready(vmid); } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c index 67089765780b..340ef4d41ebd 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c @@ -377,6 +377,12 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; + /* As per DSC spec v1.2a recommendation: */ + if (vdsc_cfg->native_420) + vdsc_cfg->second_line_offset_adj = 512; + else + vdsc_cfg->second_line_offset_adj = 0; + return 0; } EXPORT_SYMBOL(drm_dsc_compute_rc_parameters); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c89393c19232..a148ffde8b12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -212,7 +212,7 @@ struct resource_pool { struct clock_source *clock_sources[MAX_CLOCK_SOURCES]; unsigned int clk_src_count; - struct audio *audios[MAX_PIPES]; + struct audio *audios[MAX_AUDIOS]; unsigned int audio_count; struct audio_support audio_support; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 959f5b654611..9502478c4a1b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -61,8 +61,8 @@ enum dcn_hubbub_page_table_depth { }; enum dcn_hubbub_page_table_block_size { - DCN_PAGE_TABLE_BLOCK_SIZE_4KB, - DCN_PAGE_TABLE_BLOCK_SIZE_64KB + DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0, + DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4, }; struct dcn_hubbub_phys_addr_config { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 8759ec03aede..f82365e2d03c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -34,6 +34,7 @@ * Data types shared between different Virtual HW blocks ******************************************************************************/ +#define MAX_AUDIOS 7 #define MAX_PIPES 6 #if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB_PIPES 1 diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index 1c66166d0a94..2c90d1b46c8b 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -43,7 +43,7 @@ enum dpcd_revision { enum dpcd_downstream_port_type { DOWNSTREAM_DP = 0, DOWNSTREAM_VGA, - DOWNSTREAM_DVI_HDMI, + DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS,/* DVI, HDMI, DP++ */ DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */ }; diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9f661bf96ed0..5b1ebb7f995a 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -123,6 +123,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, AMDGPU_PP_SENSOR_MIN_FAN_RPM, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + AMDGPU_PP_SENSOR_VCN_POWER_STATE, }; enum amd_pp_task { diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index f1565c448de5..0685a3388e38 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -137,12 +137,37 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, { int ret = 0, clk_id = 0; uint32_t param = 0; + uint32_t clock_limit; if (!min && !max) return -EINVAL; - if (!smu_clk_dpm_is_enabled(smu, clk_type)) + if (!smu_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + clock_limit = smu->smu_table.boot_values.uclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + clock_limit = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + clock_limit = smu->smu_table.boot_values.socclk; + break; + default: + clock_limit = 0; + break; + } + + /* clock in Mhz unit */ + if (min) + *min = clock_limit / 100; + if (max) + *max = clock_limit / 100; + return 0; + } mutex_lock(&smu->mutex); clk_id = smu_clk_get_index(smu, clk_type); @@ -281,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu, /* not support power state */ memset(state_info, 0, sizeof(struct pp_states_info)); - state_info->nums = 0; + state_info->nums = 1; + state_info->states[0] = POWER_STATE_TYPE_DEFAULT; return 0; } @@ -312,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; + *size = 4; + break; default: ret = -EINVAL; break; @@ -698,6 +728,12 @@ static int smu_sw_init(void *handle) return ret; } + ret = smu_register_irq_handler(smu); + if (ret) { + pr_err("Failed to register smc irq handler!\n"); + return ret; + } + return 0; } @@ -707,6 +743,9 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; + kfree(smu->irq_source); + smu->irq_source = NULL; + ret = smu_smc_table_sw_fini(smu); if (ret) { pr_err("Failed to sw fini smc table!\n"); @@ -1063,10 +1102,6 @@ static int smu_hw_init(void *handle) if (ret) goto failed; - ret = smu_register_irq_handler(smu); - if (ret) - goto failed; - if (!smu->pm_enabled) adev->pm.dpm_enabled = false; else @@ -1096,9 +1131,6 @@ static int smu_hw_fini(void *handle) kfree(table_context->overdrive_table); table_context->overdrive_table = NULL; - kfree(smu->irq_source); - smu->irq_source = NULL; - ret = smu_fini_fb_allocations(smu); if (ret) return ret; @@ -1349,13 +1381,49 @@ static int smu_enable_umd_pstate(void *handle, return 0; } +static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + uint32_t sclk_mask, mclk_mask, soc_mask; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = smu_force_dpm_limit_value(smu, true); + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = smu_force_dpm_limit_value(smu, false); + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + ret = smu_unforce_dpm_levels(smu); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = smu_get_profiling_clk_mask(smu, level, + &sclk_mask, + &mclk_mask, + &soc_mask); + if (ret) + return ret; + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: + default: + break; + } + return ret; +} + int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, bool skip_display_settings) { int ret = 0; int index = 0; - uint32_t sclk_mask, mclk_mask, soc_mask; long workload; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -1386,39 +1454,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != level) { - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = smu_force_dpm_limit_value(smu, true); - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = smu_force_dpm_limit_value(smu, false); - break; - - case AMD_DPM_FORCED_LEVEL_AUTO: - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - ret = smu_unforce_dpm_levels(smu); - break; - - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - ret = smu_get_profiling_clk_mask(smu, level, - &sclk_mask, - &mclk_mask, - &soc_mask); - if (ret) - return ret; - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); - break; - - case AMD_DPM_FORCED_LEVEL_MANUAL: - case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: - default: - break; + ret = smu_asic_set_performance_level(smu, level); + if (ret) { + ret = smu_default_set_performance_level(smu, level); } - if (!ret) smu_dpm_ctx->dpm_level = level; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index e32ae9d3373c..18e780f566fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); uint32_t sclk, mclk; int ret = 0; @@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_TEMP: *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1; + *size = 4; + break; default: ret = -EINVAL; break; @@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + if (bgate) { amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerDownVcn, 0); + smu10_data->vcn_power_gated = true; } else { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerUpVcn, 0); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); + smu10_data->vcn_power_gated = false; } } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 1af992fb0bde..208e6711d506 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -429,7 +429,6 @@ struct smu_table_context struct smu_table *tables; uint32_t table_count; struct smu_table memory_pool; - uint16_t software_shutdown_temp; uint8_t thermal_controller_type; uint16_t TDPODLimit; @@ -613,6 +612,7 @@ struct pptable_funcs { int (*tables_init)(struct smu_context *smu, struct smu_table *tables); int (*set_thermal_fan_table)(struct smu_context *smu); int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); + int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); int (*set_watermarks_table)(struct smu_context *smu, void *watermarks, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*get_current_clk_freq_by_table)(struct smu_context *smu, @@ -621,6 +621,7 @@ struct pptable_funcs { int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); int (*set_default_od_settings)(struct smu_context *smu, bool initialize); + int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); }; struct smu_funcs @@ -685,7 +686,6 @@ struct smu_funcs int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*conv_power_profile_to_pplib_workload)(int power_profile); - int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); @@ -751,8 +751,6 @@ struct smu_funcs ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) #define smu_set_default_od_settings(smu, initialize) \ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -#define smu_get_current_rpm(smu, speed) \ - ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0) #define smu_set_fan_speed_rpm(smu, speed) \ ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) #define smu_send_smc_msg(smu, msg) \ @@ -841,6 +839,8 @@ struct smu_funcs ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) #define smu_set_fan_speed_percent(smu, speed) \ ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) +#define smu_get_fan_speed_rpm(smu, speed) \ + ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0) #define smu_msg_get_index(smu, msg) \ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) @@ -918,6 +918,9 @@ struct smu_funcs ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) #define smu_baco_reset(smu) \ ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) +#define smu_asic_set_performance_level(smu, level) \ + ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); + extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev, diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 2dae0ae0829e..cc0a3b2256af 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -23,6 +23,7 @@ #include "pp_debug.h" #include <linux/firmware.h> +#include <linux/pci.h> #include "amdgpu.h" #include "amdgpu_smu.h" #include "atomfirmware.h" @@ -577,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) { int ret = 0; - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; - if (enable && power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); - if (ret) - return ret; - } - power_gate->uvd_gated = false; + if (enable) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; } else { - if (!enable && !power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); - if (ret) - return ret; - } - power_gate->uvd_gated = true; - } + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; } - return 0; + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); + + return ret; } static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, @@ -626,11 +619,26 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, return ret; } +static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + DpmDescriptor_t *dpm_desc = NULL; + uint32_t clk_index = 0; + + clk_index = smu_clk_get_index(smu, clk_type); + dpm_desc = &pptable->DpmDescriptor[clk_index]; + + /* 0 - Fine grained DPM, 1 - Discrete DPM */ + return dpm_desc->SnapToDiscrete == 0 ? true : false; +} + static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + uint32_t freq_values[3] = {0}; + uint32_t mark_index = 0; switch (clk_type) { case SMU_GFXCLK: @@ -643,22 +651,42 @@ static int navi10_print_clk_levels(struct smu_context *smu, ret = smu_get_current_clk_freq(smu, clk_type, &cur_value); if (ret) return size; + /* 10KHz -> MHz */ cur_value = cur_value / 100; - size += sprintf(buf, "current clk: %uMhz\n", cur_value); - ret = smu_get_dpm_level_count(smu, clk_type, &count); if (ret) return size; - for (i = 0; i < count; i++) { - ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + for (i = 0; i < count; i++) { + ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (ret) + return size; + + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, + cur_value == value ? "*" : ""); + } + } else { + ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); + if (ret) + return size; + ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); if (ret) return size; - size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, - cur_value == value ? "*" : ""); + freq_values[1] = cur_value; + mark_index = cur_value == freq_values[0] ? 0 : + cur_value == freq_values[2] ? 2 : 1; + if (mark_index != 1) + freq_values[1] = (freq_values[0] + freq_values[2]) / 2; + + for (i = 0; i < 3; i++) { + size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i], + i == mark_index ? "*" : ""); + } + } break; default: @@ -919,12 +947,13 @@ static bool navi10_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) +static int navi10_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) { SmuMetrics_t metrics; int ret = 0; - if (!value) + if (!speed) return -EINVAL; memset(&metrics, 0, sizeof(metrics)); @@ -934,7 +963,7 @@ static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) if (ret) return ret; - *value = metrics.CurrFanSpeed; + *speed = metrics.CurrFanSpeed; return ret; } @@ -944,10 +973,10 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu, { int ret = 0; uint32_t percent = 0; - uint16_t current_rpm; + uint32_t current_rpm; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = navi10_get_fan_speed(smu, ¤t_rpm); + ret = navi10_get_fan_speed_rpm(smu, ¤t_rpm); if (ret) return ret; @@ -1530,6 +1559,76 @@ static int navi10_set_ppfeature_status(struct smu_context *smu, return 0; } +static int navi10_set_peak_clock_by_device(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + uint32_t sclk_freq = 0, uclk_freq = 0; + uint32_t uclk_level = 0; + + switch (adev->pdev->revision) { + case 0xf0: /* XTX */ + case 0xc0: + sclk_freq = NAVI10_PEAK_SCLK_XTX; + break; + case 0xf1: /* XT */ + case 0xc1: + sclk_freq = NAVI10_PEAK_SCLK_XT; + break; + default: /* XL */ + sclk_freq = NAVI10_PEAK_SCLK_XL; + break; + } + + ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); + if (ret) + return ret; + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + if (ret) + return ret; + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + if (ret) + return ret; + + return ret; +} + +static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = navi10_set_peak_clock_by_device(smu); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int navi10_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; + + if (!range || !powerplay_table) + return -EINVAL; + + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->software_shutdown_temp; + + return 0; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1557,6 +1656,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .unforce_dpm_levels = navi10_unforce_dpm_levels, .is_dpm_running = navi10_is_dpm_running, .get_fan_speed_percent = navi10_get_fan_speed_percent, + .get_fan_speed_rpm = navi10_get_fan_speed_rpm, .get_power_profile_mode = navi10_get_power_profile_mode, .set_power_profile_mode = navi10_set_power_profile_mode, .get_profiling_clk_mask = navi10_get_profiling_clk_mask, @@ -1565,6 +1665,8 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_uclk_dpm_states = navi10_get_uclk_dpm_states, .get_ppfeature_status = navi10_get_ppfeature_status, .set_ppfeature_status = navi10_set_ppfeature_status, + .set_performance_level = navi10_set_performance_level, + .get_thermal_temperature_range = navi10_get_thermal_temperature_range, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h index 957288e22f47..620ff17c2fef 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h @@ -23,6 +23,10 @@ #ifndef __NAVI10_PPT_H__ #define __NAVI10_PPT_H__ +#define NAVI10_PEAK_SCLK_XTX (1830) +#define NAVI10_PEAK_SCLK_XT (1755) +#define NAVI10_PEAK_SCLK_XL (1625) + extern void navi10_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 95c7c4dae523..ac5b26228e75 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, struct smu_temperature_range *range) { struct amdgpu_device *adev = smu->adev; - int low = SMU_THERMAL_MINIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = SMU_THERMAL_MINIMUM_ALERT_TEMP; + int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; if (!range) @@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, if (high > range->max) high = range->max; + low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min); + high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max); + if (low > high) return -EINVAL; @@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); @@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) if (!smu->pm_enabled) return ret; + ret = smu_get_thermal_temperature_range(smu, &range); + if (ret) + return ret; if (smu->smu_table.thermal_controller_type) { ret = smu_v11_0_set_thermal_range(smu, &range); @@ -1202,15 +1206,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } - adev->pm.dpm.thermal.min_temp = range.min; - adev->pm.dpm.thermal.max_temp = range.max; - adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; - adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; - adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; - adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; - adev->pm.dpm.thermal.min_mem_temp = range.mem_min; - adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; - adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; return ret; } @@ -1371,23 +1377,6 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static int smu_v11_0_get_current_rpm(struct smu_context *smu, - uint32_t *current_rpm) -{ - int ret; - - ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); - - if (ret) { - pr_err("Attempt to get current RPM from SMC Failed!\n"); - return ret; - } - - smu_read_smc_arg(smu, current_rpm); - - return 0; -} - static uint32_t smu_v11_0_get_fan_control_mode(struct smu_context *smu) { @@ -1773,7 +1762,6 @@ static const struct smu_funcs smu_v11_0_funcs = { .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, - .get_current_rpm = smu_v11_0_get_current_rpm, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index bb9bb09cfc7a..dd6fd1c8bf24 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable, sizeof(PPTable_t)); - table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); @@ -3015,6 +3014,23 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu) return ret; } +static int vega20_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) +{ + int ret; + + ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); + + if (ret) { + pr_err("Attempt to get current RPM from SMC Failed!\n"); + return ret; + } + + smu_read_smc_arg(smu, speed); + + return 0; +} + static int vega20_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) { @@ -3022,7 +3038,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu, uint32_t current_rpm = 0, percent = 0; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = smu_get_current_rpm(smu, ¤t_rpm); + ret = vega20_get_fan_speed_rpm(smu, ¤t_rpm); if (ret) return ret; @@ -3217,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu, return 0; } -static const struct smu_temperature_range vega20_thermal_policy[] = -{ - {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, - { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, -}; - static int vega20_get_thermal_temperature_range(struct smu_context *smu, struct smu_temperature_range *range) { - + struct smu_table_context *table_context = &smu->smu_table; + ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table; PPTable_t *pptable = smu->smu_table.driver_pptable; - if (!range) + if (!range || !powerplay_table) return -EINVAL; - memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range)); - - range->max = pptable->TedgeLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_crit_max = pptable->ThotspotLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_crit_max = pptable->ThbmLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)* - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->usSoftwareShutdownTemp; + range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE); + range->hotspot_crit_max = pptable->ThotspotLimit; + range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT); + range->mem_crit_max = pptable->ThbmLimit; + range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM); return 0; @@ -3293,6 +3298,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .is_dpm_running = vega20_is_dpm_running, .set_thermal_fan_table = vega20_set_thermal_fan_table, .get_fan_speed_percent = vega20_get_fan_speed_percent, + .get_fan_speed_rpm = vega20_get_fan_speed_rpm, .set_watermarks_table = vega20_set_watermarks_table, .get_thermal_temperature_range = vega20_get_thermal_temperature_range }; diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index bc19dbd531ef..359030d5d818 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.fb_base = bochs->fb_base; bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; + bochs->dev->mode_config.prefer_shadow_fbdev = 1; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index ee777469293a..e4e22bbae2a7 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC config DRM_LVDS_ENCODER tristate "Transparent parallel to LVDS encoder support" depends on OF + select DRM_KMS_HELPER select DRM_PANEL_BRIDGE help Support for transparent parallel to LVDS encoders that don't require @@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024 config DRM_TOSHIBA_TC358764 tristate "TC358764 DSI/LVDS bridge" - depends on DRM && DRM_PANEL depends on OF select DRM_MIPI_DSI + select DRM_KMS_HELPER + select DRM_PANEL help Toshiba TC358764 DSI/LVDS bridge driver. diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 410572f14257..e1dafb0cc5e2 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u struct drm_device *dev = client->dev; struct drm_client_buffer *buffer; struct drm_gem_object *obj; - void *vaddr; int ret; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); @@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u buffer->gem = obj; + return buffer; + +err_delete: + drm_client_buffer_delete(buffer); + + return ERR_PTR(ret); +} + +/** + * drm_client_buffer_vmap - Map DRM client buffer into address space + * @buffer: DRM client buffer + * + * This function maps a client buffer into kernel address space. If the + * buffer is already mapped, it returns the mapping's address. + * + * Client buffer mappings are not ref'counted. Each call to + * drm_client_buffer_vmap() should be followed by a call to + * drm_client_buffer_vunmap(); or the client buffer should be mapped + * throughout its lifetime. + * + * Returns: + * The mapped memory's address + */ +void *drm_client_buffer_vmap(struct drm_client_buffer *buffer) +{ + void *vaddr; + + if (buffer->vaddr) + return buffer->vaddr; + /* * FIXME: The dependency on GEM here isn't required, we could * convert the driver handle to a dma-buf instead and use the @@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u * fd_install step out of the driver backend hooks, to make that * final step optional for internal users. */ - vaddr = drm_gem_vmap(obj); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto err_delete; - } + vaddr = drm_gem_vmap(buffer->gem); + if (IS_ERR(vaddr)) + return vaddr; buffer->vaddr = vaddr; - return buffer; - -err_delete: - drm_client_buffer_delete(buffer); + return vaddr; +} +EXPORT_SYMBOL(drm_client_buffer_vmap); - return ERR_PTR(ret); +/** + * drm_client_buffer_vunmap - Unmap DRM client buffer + * @buffer: DRM client buffer + * + * This function removes a client buffer's memory mapping. Calling this + * function is only required by clients that manage their buffer mappings + * by themselves. + */ +void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) +{ + drm_gem_vunmap(buffer->gem, buffer->vaddr); + buffer->vaddr = NULL; } +EXPORT_SYMBOL(drm_client_buffer_vunmap); static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) { diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 56d36779d213..c8922b7cac09 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -859,7 +859,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) * simple XOR between the two handle the addition nicely. */ cmdline = &connector->cmdline_mode; - if (cmdline->specified) { + if (cmdline->specified && cmdline->rotation_reflection) { unsigned int cmdline_rest, panel_rest; unsigned int cmdline_rot, panel_rot; unsigned int sum_rot, sum_rest; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1984e5c54d58..a7ba5b4902d6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) struct drm_clip_rect *clip = &helper->dirty_clip; struct drm_clip_rect clip_copy; unsigned long flags; + void *vaddr; spin_lock_irqsave(&helper->dirty_lock, flags); clip_copy = *clip; @@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) /* call dirty callback only when it has been really touched */ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { + /* Generic fbdev uses a shadow buffer */ - if (helper->buffer) + if (helper->buffer) { + vaddr = drm_client_buffer_vmap(helper->buffer); + if (IS_ERR(vaddr)) + return; drm_fb_helper_dirty_blit_real(helper, &clip_copy); - helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); + } + if (helper->fb->funcs->dirty) + helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, + &clip_copy, 1); + + if (helper->buffer) + drm_client_buffer_vunmap(helper->buffer); } } @@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); +static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_framebuffer *fb = fb_helper->fb; + + return dev->mode_config.prefer_shadow_fbdev || + dev->mode_config.prefer_shadow || + fb->funcs->dirty; +} + static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, u32 width, u32 height) { @@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, struct drm_clip_rect *clip = &helper->dirty_clip; unsigned long flags; - if (!helper->fb->funcs->dirty) + if (!drm_fbdev_use_shadow_fb(helper)) return; spin_lock_irqsave(&helper->dirty_lock, flags); @@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, struct drm_framebuffer *fb; struct fb_info *fbi; u32 format; + void *vaddr; DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, @@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->fbops = &drm_fbdev_fb_ops; fbi->screen_size = fb->height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; - fbi->screen_buffer = buffer->vaddr; - /* Shamelessly leak the physical address to user-space */ -#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) - fbi->fix.smem_start = - page_to_phys(virt_to_page(fbi->screen_buffer)); -#endif + drm_fb_helper_fill_info(fbi, fb_helper, sizes); - if (fb->funcs->dirty) { + if (drm_fbdev_use_shadow_fb(fb_helper)) { struct fb_ops *fbops; void *shadow; @@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->fbdefio = &drm_fbdev_defio; fb_deferred_io_init(fbi); + } else { + /* buffer is mapped for HW framebuffer */ + vaddr = drm_client_buffer_vmap(fb_helper->buffer); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + fbi->screen_buffer = vaddr; + /* Shamelessly leak the physical address to user-space */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) + if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) + fbi->fix.smem_start = + page_to_phys(virt_to_page(fbi->screen_buffer)); +#endif } return 0; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 0b72468e8131..57564318ceea 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -835,7 +835,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb) struct drm_device *dev = fb->dev; struct drm_atomic_state *state; struct drm_plane *plane; - struct drm_connector *conn; + struct drm_connector *conn __maybe_unused; struct drm_connector_state *conn_state; int i, ret; unsigned plane_mask; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 74a5739df506..80fcd5dc1558 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1686,7 +1686,7 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len, * * Additionals options can be provided following the mode, using a comma to * separate each option. Valid options can be found in - * Documentation/fb/modedb.txt. + * Documentation/fb/modedb.rst. * * The intermediate drm_cmdline_mode structure is required to store additional * options from the command line modline like the force-enable/disable flag. diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 60ce4a8ad9e1..6f7d3b3b3628 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -2,6 +2,7 @@ config DRM_EXYNOS tristate "DRM Support for Samsung SoC EXYNOS Series" depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST) + depends on MMU select DRM_KMS_HELPER select VIDEOMODE_HELPERS select SND_SOC_HDMI_CODEC if SND_SOC diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index a594ab7be2c0..164d914cbe9a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc; module_param_named(fimc_devs, fimc_mask, uint, 0644); MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM"); -#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) +#define get_fimc_context(dev) dev_get_drvdata(dev) enum { FIMC_CLK_LCLK, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 50904eee96f7..2a3382d43bc9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d) static int g2d_init_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; - struct g2d_cmdlist_node *node = g2d->cmdlist_node; + struct g2d_cmdlist_node *node; int nr; int ret; struct g2d_buf_info *buf_info; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 1e4b21c49a06..1c524db9570f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -58,7 +58,7 @@ #define GSC_COEF_DEPTH 3 #define GSC_AUTOSUSPEND_DELAY 2000 -#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev)) +#define get_gsc_context(dev) dev_get_drvdata(dev) #define gsc_read(offset) readl(ctx->regs + (offset)) #define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 9af096479e1c..b24ba948b725 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler) scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); do { cpu_relax(); - } while (retry > 1 && + } while (--retry > 1 && scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); do { cpu_relax(); scaler_write(1, SCALER_INT_EN); - } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); + } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1); return retry ? 0 : -EIO; } diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 91355c2ea8a5..8cace65f50ce 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -16,7 +16,6 @@ subdir-ccflags-y := -Wall -Wextra subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) subdir-ccflags-y += $(call cc-disable-warning, type-limits) subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) -subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough) subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) # clang warnings subdir-ccflags-y += $(call cc-disable-warning, sign-compare) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index c4710889cb32..3ef4e9f573cf 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -765,7 +765,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) } if (bdb->version >= 226) { - u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time; + u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time; wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3; switch (wakeup_time) { diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 753ac3165061..7b908e10d32e 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) clpchgroup = (sa->deburst * deinterleave / num_channels) << i; bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + bi->num_qgv_points = qi.num_points; + for (j = 0; j < qi.num_points; j++) { const struct intel_qgv_point *sp = &qi.points[j]; int ct, bw; @@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) bi->deratedbw[j] = min(maxdebw, bw * 9 / 10); /* 90% */ - DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n", + DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n", i, j, bi->num_planes, bi->deratedbw[j]); } @@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, { int i; - /* Did we initialize the bw limits successfully? */ - if (dev_priv->max_bw[0].num_planes == 0) - return UINT_MAX; - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { const struct intel_bw_info *bi = &dev_priv->max_bw[i]; + /* + * Pcode will not expose all QGV points when + * SAGV is forced to off/min/med/max. + */ + if (qgv_point >= bi->num_qgv_points) + return UINT_MAX; + if (num_planes >= bi->num_planes) return bi->deratedbw[qgv_point]; } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 8993ab283562..0d19bbd08122 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2240,6 +2240,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) min_cdclk = max(2 * 96000, min_cdclk); /* + * "For DP audio configuration, cdclk frequency shall be set to + * meet the following requirements: + * DP Link Frequency(MHz) | Cdclk frequency(MHz) + * 270 | 320 or higher + * 162 | 200 or higher" + */ + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio) + min_cdclk = max(crtc_state->port_clock, min_cdclk); + + /* * On Valleyview some DSI panels lose (v|h)sync when the clock is lower * than 320000KHz. */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8592a7d422de..592b92782fab 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) /* FIXME: assert CPU port conditions for SNB+ */ } - trace_intel_pipe_enable(dev_priv, pipe); + trace_intel_pipe_enable(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); @@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); - trace_intel_pipe_disable(dev_priv, pipe); + trace_intel_pipe_disable(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); @@ -12042,7 +12042,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) case INTEL_OUTPUT_DDI: if (WARN_ON(!HAS_DDI(to_i915(dev)))) break; - /* else: fall through */ + /* else, fall through */ case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c93ad512014c..2d1939db108f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -438,16 +438,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, #define ICL_AUX_PW_TO_CH(pw_idx) \ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) +#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) + static void icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); + int pw_idx = power_well->desc->hsw.idx; + bool is_tbt = power_well->desc->hsw.is_tc_tbt; + enum aux_ch aux_ch; u32 val; + aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : + ICL_AUX_PW_TO_CH(pw_idx); val = I915_READ(DP_AUX_CH_CTL(aux_ch)); val &= ~DP_AUX_CH_CTL_TBT_IO; - if (power_well->desc->hsw.is_tc_tbt) + if (is_tbt) val |= DP_AUX_CH_CTL_TBT_IO; I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 4336df46fe78..d0fc34826771 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -231,6 +231,7 @@ static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp) switch (lane_info) { default: MISSING_CASE(lane_info); + /* fall through */ case 1: case 2: case 4: diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 2f4894e9a03d..5ddbe71ab423 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -478,13 +478,13 @@ struct psr_table { /* TP wake up time in multiple of 100 */ u16 tp1_wakeup_time; u16 tp2_tp3_wakeup_time; - - /* PSR2 TP2/TP3 wakeup time for 16 panels */ - u32 psr2_tp2_tp3_wakeup_time; } __packed; struct bdb_psr { struct psr_table psr_table[16]; + + /* PSR2 TP2/TP3 wakeup time for 16 panels */ + u32 psr2_tp2_tp3_wakeup_time; } __packed; /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 391621ee3cbb..39a661927d8e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -341,7 +341,7 @@ err: */ if (!i915_terminally_wedged(i915)) return VM_FAULT_SIGBUS; - /* else: fall through */ + /* else, fall through */ case -EAGAIN: /* * EAGAIN means the gpu is hung and we'll wait for the error diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index b36ad269f4ea..65eb430cedba 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -268,7 +268,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, switch (type) { default: MISSING_CASE(type); - /* fallthrough to use PAGE_KERNEL anyway */ + /* fallthrough - to use PAGE_KERNEL anyway */ case I915_MAP_WB: pgprot = PAGE_KERNEL; break; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 05011d4a3b88..914b5d4112bb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915) i915_gem_restore_gtt_mappings(i915); i915_gem_restore_fences(i915); + if (i915_gem_init_hw(i915)) + goto err_wedged; + /* * As we didn't flush the kernel context before suspend, we cannot * guarantee that the context image is complete. So let's just reset * it and start again. */ - intel_gt_resume(i915); - - if (i915_gem_init_hw(i915)) + if (intel_gt_resume(i915)) goto err_wedged; intel_uc_resume(i915); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 528b61678334..2caa594322bc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -664,7 +664,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, for_each_sgt_page(page, sgt_iter, pages) { if (obj->mm.dirty) - set_page_dirty(page); + /* + * As this may not be anonymous memory (e.g. shmem) + * but exist on a real mapping, we have to lock + * the page in order to dirty it -- holding + * the page reference is not sufficient to + * prevent the inode from being truncated. + * Play safe and take the lock. + */ + set_page_dirty_lock(page); mark_page_accessed(page); put_page(page); diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 2c454f227c2e..23120901c55f 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active) if (ce->state) __context_unpin_state(ce->state); + intel_ring_unpin(ce->ring); intel_context_put(ce); } @@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags) intel_context_get(ce); + err = intel_ring_pin(ce->ring); + if (err) + goto err_put; + if (!ce->state) return 0; err = __context_pin_state(ce->state, flags); - if (err) { - i915_active_cancel(&ce->active); - intel_context_put(ce); - return err; - } + if (err) + goto err_ring; /* Preallocate tracking nodes */ if (!i915_gem_context_is_kernel(ce->gem_context)) { err = i915_active_acquire_preallocate_barrier(&ce->active, ce->engine); - if (err) { - i915_active_release(&ce->active); - return err; - } + if (err) + goto err_state; } return 0; + +err_state: + __context_unpin_state(ce->state); +err_ring: + intel_ring_unpin(ce->ring); +err_put: + intel_context_put(ce); + i915_active_cancel(&ce->active); + return err; } void intel_context_active_release(struct intel_context *ce) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 7fd33e81c2d9..f25632c9b292 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -969,9 +969,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + unsigned int slice = fls(sseu->slice_mask) - 1; + unsigned int subslice; u32 mcr_s_ss_select; - u32 slice = fls(sseu->slice_mask); - u32 subslice = fls(sseu->subslice_mask[slice]); + + GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); + subslice = fls(sseu->subslice_mask[slice]); + GEM_BUG_ON(!subslice); + subslice--; if (IS_GEN(dev_priv, 10)) mcr_s_ss_select = GEN8_MCR_SLICE(slice) | @@ -1471,6 +1476,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct i915_gpu_error * const error = &engine->i915->gpu_error; struct i915_request *rq; intel_wakeref_t wakeref; + unsigned long flags; if (header) { va_list ap; @@ -1490,10 +1496,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, i915_reset_engine_count(error, engine), i915_reset_count(error)); - rcu_read_lock(); - drm_printf(m, "\tRequests:\n"); + spin_lock_irqsave(&engine->active.lock, flags); rq = intel_engine_find_active_request(engine); if (rq) { print_request(m, rq, "\t\tactive "); @@ -1513,8 +1518,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, print_request_ring(m, rq); } - - rcu_read_unlock(); + spin_unlock_irqrestore(&engine->active.lock, flags); wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); if (wakeref) { @@ -1672,7 +1676,6 @@ struct i915_request * intel_engine_find_active_request(struct intel_engine_cs *engine) { struct i915_request *request, *active = NULL; - unsigned long flags; /* * We are called by the error capture, reset and to dump engine @@ -1685,7 +1688,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) * At all other times, we must assume the GPU is still running, but * we only care about the snapshot of this moment. */ - spin_lock_irqsave(&engine->active.lock, flags); + lockdep_assert_held(&engine->active.lock); list_for_each_entry(request, &engine->active.requests, sched.link) { if (i915_request_completed(request)) continue; @@ -1700,7 +1703,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) active = request; break; } - spin_unlock_irqrestore(&engine->active.lock, flags); return active; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 2ce00d3dc42a..ae5b6baf6dff 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine) { intel_wakeref_init(&engine->wakeref); } - -int intel_engines_resume(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = 0; - - intel_gt_pm_get(i915); - for_each_engine(engine, i915, id) { - intel_engine_pm_get(engine); - engine->serial++; /* kernel context lost */ - err = engine->resume(engine); - intel_engine_pm_put(engine); - if (err) { - dev_err(i915->drm.dev, - "Failed to restart %s (%d)\n", - engine->name, err); - break; - } - } - intel_gt_pm_put(i915); - - return err; -} diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index b326cd993d60..a11c893f64c6 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -7,16 +7,22 @@ #ifndef INTEL_ENGINE_PM_H #define INTEL_ENGINE_PM_H +#include "intel_engine_types.h" +#include "intel_wakeref.h" + struct drm_i915_private; -struct intel_engine_cs; void intel_engine_pm_get(struct intel_engine_cs *engine); void intel_engine_pm_put(struct intel_engine_cs *engine); +static inline bool +intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) +{ + return intel_wakeref_get_if_active(&engine->wakeref); +} + void intel_engine_park(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine); -int intel_engines_resume(struct drm_i915_private *i915); - #endif /* INTEL_ENGINE_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 868b220214f8..43e975a26016 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -70,6 +70,18 @@ struct intel_ring { struct list_head request_list; struct list_head active_link; + /* + * As we have two types of rings, one global to the engine used + * by ringbuffer submission and those that are exclusive to a + * context used by execlists, we have to play safe and allow + * atomic updates to the pin_count. However, the actual pinning + * of the context is either done during initialisation for + * ringbuffer submission or serialised as part of the context + * pinning for execlists, and so we do not need a mutex ourselves + * to serialise intel_ring_pin/intel_ring_unpin. + */ + atomic_t pin_count; + u32 head; u32 tail; u32 emit; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 7b5967751762..9f8f7f54191f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -5,6 +5,7 @@ */ #include "i915_drv.h" +#include "intel_engine_pm.h" #include "intel_gt_pm.h" #include "intel_pm.h" #include "intel_wakeref.h" @@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force) intel_engine_reset(engine, false); } -void intel_gt_resume(struct drm_i915_private *i915) +int intel_gt_resume(struct drm_i915_private *i915) { struct intel_engine_cs *engine; enum intel_engine_id id; + int err = 0; /* * After resume, we may need to poke into the pinned kernel @@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915) * Only the kernel contexts should remain pinned over suspend, * allowing us to fixup the user contexts on their first pin. */ + intel_gt_pm_get(i915); for_each_engine(engine, i915, id) { struct intel_context *ce; + intel_engine_pm_get(engine); + ce = engine->kernel_context; if (ce) ce->ops->reset(ce); @@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915) ce = engine->preempt_context; if (ce) ce->ops->reset(ce); + + engine->serial++; /* kernel context lost */ + err = engine->resume(engine); + + intel_engine_pm_put(engine); + if (err) { + dev_err(i915->drm.dev, + "Failed to restart %s (%d)\n", + engine->name, err); + break; + } } + intel_gt_pm_put(i915); + + return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 7dd1130a19a4..53f342b20181 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915); void intel_gt_pm_init(struct drm_i915_private *i915); void intel_gt_sanitize(struct drm_i915_private *i915, bool force); -void intel_gt_resume(struct drm_i915_private *i915); +int intel_gt_resume(struct drm_i915_private *i915); #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index b42b5f158295..82b7ace62d97 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref) { struct intel_context *ce = container_of(kref, typeof(*ce), ref); + GEM_BUG_ON(!i915_active_is_idle(&ce->active)); GEM_BUG_ON(intel_context_is_pinned(ce)); if (ce->state) @@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce) { i915_gem_context_unpin_hw_id(ce->gem_context); i915_gem_object_unpin_map(ce->state->obj); - intel_ring_unpin(ce->ring); } static void @@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce, goto unpin_active; } - ret = intel_ring_pin(ce->ring); - if (ret) - goto unpin_map; - ret = i915_gem_context_pin_hw_id(ce->gem_context); if (ret) - goto unpin_ring; + goto unpin_map; ce->lrc_desc = lrc_descriptor(ce, engine); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; @@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce, return 0; -unpin_ring: - intel_ring_unpin(ce->ring); unpin_map: i915_gem_object_unpin_map(ce->state->obj); unpin_active: diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 4c478b38e420..3f907701ef4d 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine) * written to the powercontext is undefined and so we may lose * GPU state upon resume, i.e. fail to restart after a reset. */ - intel_engine_pm_get(engine); intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); engine->reset.prepare(engine); } @@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915) } } -static void reset_prepare(struct drm_i915_private *i915) +static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915) { struct intel_engine_cs *engine; + intel_engine_mask_t awake = 0; enum intel_engine_id id; - intel_gt_pm_get(i915); - for_each_engine(engine, i915, id) + for_each_engine(engine, i915, id) { + if (intel_engine_pm_get_if_awake(engine)) + awake |= engine->mask; reset_prepare_engine(engine); + } intel_uc_reset_prepare(i915); + + return awake; } static void gt_revoke(struct drm_i915_private *i915) @@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915, static void reset_finish_engine(struct intel_engine_cs *engine) { engine->reset.finish(engine); - intel_engine_pm_put(engine); intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); + + intel_engine_signal_breadcrumbs(engine); } -static void reset_finish(struct drm_i915_private *i915) +static void reset_finish(struct drm_i915_private *i915, + intel_engine_mask_t awake) { struct intel_engine_cs *engine; enum intel_engine_id id; for_each_engine(engine, i915, id) { reset_finish_engine(engine); - intel_engine_signal_breadcrumbs(engine); + if (awake & engine->mask) + intel_engine_pm_put(engine); } - intel_gt_pm_put(i915); } static void nop_submit_request(struct i915_request *request) @@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) { struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; + intel_engine_mask_t awake; enum intel_engine_id id; if (test_bit(I915_WEDGED, &error->flags)) @@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) * rolling the global seqno forward (since this would complete requests * for which we haven't set the fence error to EIO yet). */ - reset_prepare(i915); + awake = reset_prepare(i915); /* Even if the GPU reset fails, it should still stop the engines */ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) @@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) for_each_engine(engine, i915, id) engine->cancel_requests(engine); - reset_finish(i915); + reset_finish(i915, awake); GEM_TRACE("end\n"); } @@ -951,6 +958,21 @@ static int do_reset(struct drm_i915_private *i915, return gt_reset(i915, stalled_mask); } +static int resume(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int ret; + + for_each_engine(engine, i915, id) { + ret = engine->resume(engine); + if (ret) + return ret; + } + + return 0; +} + /** * i915_reset - reset chip after a hang * @i915: #drm_i915_private to reset @@ -973,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915, const char *reason) { struct i915_gpu_error *error = &i915->gpu_error; + intel_engine_mask_t awake; int ret; GEM_TRACE("flags=%lx\n", error->flags); @@ -989,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915, dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); error->reset_count++; - reset_prepare(i915); + awake = reset_prepare(i915); if (!intel_has_gpu_reset(i915)) { if (i915_modparams.reset) @@ -1024,13 +1047,17 @@ void i915_reset(struct drm_i915_private *i915, if (ret) { DRM_ERROR("Failed to initialise HW following reset (%d)\n", ret); - goto error; + goto taint; } + ret = resume(i915); + if (ret) + goto taint; + i915_queue_hangcheck(i915); finish: - reset_finish(i915); + reset_finish(i915, awake); unlock: mutex_unlock(&error->wedge_mutex); return; @@ -1081,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); - if (!intel_wakeref_active(&engine->wakeref)) + if (!intel_engine_pm_get_if_awake(engine)) return 0; reset_prepare_engine(engine); @@ -1116,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) * process to program RING_MODE, HWSP and re-enable submission. */ ret = engine->resume(engine); - if (ret) - goto out; out: intel_engine_cancel_stop_cs(engine); reset_finish_engine(engine); + intel_engine_pm_put(engine); return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index c6023bc9452d..12010e798868 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq, int intel_ring_pin(struct intel_ring *ring) { struct i915_vma *vma = ring->vma; - enum i915_map_type map = i915_coherent_map_type(vma->vm->i915); unsigned int flags; void *addr; int ret; - GEM_BUG_ON(ring->vaddr); + if (atomic_fetch_inc(&ring->pin_count)) + return 0; ret = i915_timeline_pin(ring->timeline); if (ret) - return ret; + goto err_unpin; flags = PIN_GLOBAL; @@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring) ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) - goto unpin_timeline; + goto err_timeline; if (i915_vma_is_map_and_fenceable(vma)) addr = (void __force *)i915_vma_pin_iomap(vma); else - addr = i915_gem_object_pin_map(vma->obj, map); + addr = i915_gem_object_pin_map(vma->obj, + i915_coherent_map_type(vma->vm->i915)); if (IS_ERR(addr)) { ret = PTR_ERR(addr); - goto unpin_ring; + goto err_ring; } vma->obj->pin_global++; + GEM_BUG_ON(ring->vaddr); ring->vaddr = addr; + return 0; -unpin_ring: +err_ring: i915_vma_unpin(vma); -unpin_timeline: +err_timeline: i915_timeline_unpin(ring->timeline); +err_unpin: + atomic_dec(&ring->pin_count); return ret; } @@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail) void intel_ring_unpin(struct intel_ring *ring) { - GEM_BUG_ON(!ring->vma); - GEM_BUG_ON(!ring->vaddr); + if (!atomic_dec_and_test(&ring->pin_count)) + return; /* Discard any unused bytes beyond that submitted to hw. */ intel_ring_reset(ring, ring->tail); + GEM_BUG_ON(!ring->vma); if (i915_vma_is_map_and_fenceable(ring->vma)) i915_vma_unpin_iomap(ring->vma); else i915_gem_object_unpin_map(ring->vma->obj); + + GEM_BUG_ON(!ring->vaddr); ring->vaddr = NULL; ring->vma->obj->pin_global--; @@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine) WARN_ON(INTEL_GEN(dev_priv) > 2 && (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); + intel_engine_cleanup_common(engine); + intel_ring_unpin(engine->buffer); intel_ring_put(engine->buffer); - intel_engine_cleanup_common(engine); kfree(engine); } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 15e90fd2cfdc..98dfb086320f 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1098,10 +1098,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine) static void cfl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + if (engine->class != RENDER_CLASS) return; - gen9_whitelist_build(&engine->whitelist); + gen9_whitelist_build(w); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); } static void cnl_whitelist_build(struct intel_engine_cs *engine) @@ -1129,6 +1144,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) /* WaEnableStateCacheRedirectToCS:icl */ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); break; case VIDEO_DECODE_CLASS: @@ -1258,8 +1286,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) wa_write_or(wal, GEN7_SARCHKMD, - GEN7_DISABLE_DEMAND_PREFETCH | - GEN7_DISABLE_SAMPLER_PREFETCH); + GEN7_DISABLE_DEMAND_PREFETCH); + + /* Wa_1606682166:icl */ + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_SAMPLER_PREFETCH); } if (IS_GEN_RANGE(i915, 9, 11)) { diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 086801b51441..486c6953dcb1 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) ring->base.effective_size = sz; ring->base.vaddr = (void *)(ring + 1); ring->base.timeline = &ring->timeline; + atomic_set(&ring->base.pin_count, 1); INIT_LIST_HEAD(&ring->base.request_list); intel_ring_update_space(&ring->base); diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 89da9e7cc1ba..b5c590c9ccba 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg) goto unlock; for (p = igt_atomic_phases; p->name; p++) { + intel_engine_mask_t awake; + GEM_TRACE("intel_gpu_reset under %s\n", p->name); + awake = reset_prepare(i915); p->critical_section_begin(); reset_prepare(i915); err = intel_gpu_reset(i915, ALL_ENGINES); - reset_finish(i915); p->critical_section_end(); + reset_finish(i915, awake); if (err) { pr_err("intel_gpu_reset failed under %s\n", p->name); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 9eaf030affd0..44becd9538be 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -925,7 +925,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine, err = 0; for (i = 0; i < engine->whitelist.count; i++) { - if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg)) + const struct i915_wa *wa = &engine->whitelist.list[i]; + + if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD) + continue; + + if (!fn(engine, a[i], b[i], wa->reg)) err = -EINVAL; } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 6ea88270c818..b09dc315e2da 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload) gma_head == gma_tail) return 0; - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { - ret = -EINVAL; - goto out; - } - ret = ip_gma_set(&s, gma_head); if (ret) goto out; @@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) s.workload = workload; s.is_ctx_wa = true; - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { - ret = -EINVAL; - goto out; - } - ret = ip_gma_set(&s, gma_head); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 65e847392aea..8bb292b01271 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); @@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); @@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 53115bdae12b..4b04af569c05 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; unsigned long index = off >> info->gtt_entry_size_shift; + unsigned long gma; struct intel_gvt_gtt_entry e; if (bytes != 4 && bytes != 8) return -EINVAL; + gma = index << I915_GTT_PAGE_SHIFT; + if (!intel_gvt_ggtt_validate_range(vgpu, + gma, 1 << I915_GTT_PAGE_SHIFT)) { + gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma); + memset(p_data, 0, bytes); + return 0; + } + ggtt_get_guest_entry(ggtt_mm, &e, index); memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), bytes); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 144301b778df..23aa3e50cbf8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1911,6 +1911,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; + } else if (entry->size != size) { + /* the same gfn with different size: unmap and re-map */ + gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); + __gvt_cache_remove_entry(vgpu, entry); + + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); + if (ret) + goto err_unlock; + + ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); + if (ret) + goto err_unmap; } else { kref_get(&entry->ref); *dma_addr = entry->dma_addr; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 2144fb46d0e1..9f3fd7d96a69 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx->indirect_ctx.shadow_va = NULL; } -static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, - struct i915_gem_context *ctx) +static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, + struct i915_gem_context *ctx) { struct intel_vgpu_mm *mm = workload->shadow_mm; struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); int i = 0; - if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) - return -EINVAL; - if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; } else { @@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } - - return 0; } static int @@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) static int prepare_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + int ring = workload->ring_id; int ret = 0; ret = intel_vgpu_pin_mm(workload->shadow_mm); @@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload) return ret; } + if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || + !workload->shadow_mm->ppgtt_mm.shadowed) { + gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); + return -EINVAL; + } + update_shadow_pdps(workload); + set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); + ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { gvt_vgpu_err("fail to vgpu sync oos pages\n"); @@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct intel_vgpu_submission *s = &vgpu->submission; struct i915_request *rq; int ring_id = workload->ring_id; int ret; @@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); - ret = set_context_ppgtt_from_shadow(workload, - s->shadow[ring_id]->gem_context); - if (ret < 0) { - gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); - goto err_req; - } - ret = intel_gvt_workload_req_alloc(workload); if (ret) goto err_req; @@ -990,6 +987,7 @@ static int workload_thread(void *priv) int ret; bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9); DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm; kfree(p); @@ -1013,6 +1011,8 @@ static int workload_thread(void *priv) workload->ring_id, workload, workload->vgpu->id); + intel_runtime_pm_get(rpm); + gvt_dbg_sched("ring id %d will dispatch workload %p\n", workload->ring_id, workload); @@ -1042,6 +1042,7 @@ complete: intel_uncore_forcewake_put(&gvt->dev_priv->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put_unchecked(rpm); if (ret && (vgpu_is_vm_unhealthy(ret))) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); } @@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); + if (!intel_gvt_ggtt_validate_range(vgpu, start, + _RING_CTL_BUF_SIZE(ctl))) { + gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start); + return ERR_PTR(-EINVAL); + } + workload = alloc_workload(vgpu); if (IS_ERR(workload)) return workload; @@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, workload->wa_ctx.indirect_ctx.size = (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * CACHELINE_BYTES; + + if (workload->wa_ctx.indirect_ctx.size != 0) { + if (!intel_gvt_ggtt_validate_range(vgpu, + workload->wa_ctx.indirect_ctx.guest_gma, + workload->wa_ctx.indirect_ctx.size)) { + kmem_cache_free(s->workloads, workload); + gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", + workload->wa_ctx.indirect_ctx.guest_gma); + return ERR_PTR(-EINVAL); + } + } + workload->wa_ctx.per_ctx.guest_gma = per_ctx & PER_CTX_ADDR_MASK; workload->wa_ctx.per_ctx.valid = per_ctx & 1; + if (workload->wa_ctx.per_ctx.valid) { + if (!intel_gvt_ggtt_validate_range(vgpu, + workload->wa_ctx.per_ctx.guest_gma, + CACHELINE_BYTES)) { + kmem_cache_free(s->workloads, workload); + gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", + workload->wa_ctx.per_ctx.guest_gma); + return ERR_PTR(-EINVAL); + } + } } gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c index a3deed692b9c..fe552e877e09 100644 --- a/drivers/gpu/drm/i915/gvt/trace_points.c +++ b/drivers/gpu/drm/i915/gvt/trace_points.c @@ -28,8 +28,6 @@ * */ -#include "trace.h" - #ifndef __CHECKER__ #define CREATE_TRACE_POINTS #include "trace.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bc909ec5d9c3..fe7a6ec2c199 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1674,8 +1674,9 @@ struct drm_i915_private { } dram_info; struct intel_bw_info { - int num_planes; - int deratedbw[3]; + unsigned int deratedbw[3]; /* for each QGV point */ + u8 num_qgv_points; + u8 num_planes; } max_bw[6]; struct drm_private_obj bw_obj; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 190ad54fb072..8a659d3d7435 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -46,7 +46,6 @@ #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_pm.h" #include "gem/i915_gemfs.h" -#include "gt/intel_engine_pm.h" #include "gt/intel_gt_pm.h" #include "gt/intel_mocs.h" #include "gt/intel_reset.h" @@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) intel_mocs_init_l3cc_table(dev_priv); - /* Only when the HW is re-initialised, can we replay the requests */ - ret = intel_engines_resume(dev_priv); - if (ret) - goto cleanup_uc; - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_engines_set_scheduler_caps(dev_priv); return 0; -cleanup_uc: - intel_uc_fini_hw(dev_priv); out: intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - return ret; } @@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) goto err_uc_init; + /* Only when the HW is re-initialised, can we replay the requests */ + ret = intel_gt_resume(dev_priv); + if (ret) + goto err_init_hw; + /* * Despite its name intel_init_clock_gating applies both display * clock gating workarounds; GT mmio workarounds and the occasional @@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ret = intel_engines_verify_workarounds(dev_priv); if (ret) - goto err_init_hw; + goto err_gt; ret = __intel_engines_record_defaults(dev_priv); if (ret) - goto err_init_hw; + goto err_gt; if (i915_inject_load_failure()) { ret = -ENODEV; - goto err_init_hw; + goto err_gt; } if (i915_inject_load_failure()) { ret = -EIO; - goto err_init_hw; + goto err_gt; } intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); @@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * HW as irrevisibly wedged, but keep enough state around that the * driver doesn't explode during runtime. */ -err_init_hw: +err_gt: mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_set_wedged(dev_priv); @@ -1630,6 +1626,7 @@ err_init_hw: i915_gem_drain_workqueue(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); +err_init_hw: intel_uc_fini_hw(dev_priv); err_uc_init: intel_uc_fini(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8ab820145ea6..7015a97b1097 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1444,9 +1444,11 @@ unwind_pd: spin_lock(&pdp->lock); if (atomic_dec_and_test(&pd->used)) { gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); + pdp->entry[pdpe] = vm->scratch_pd; GEM_BUG_ON(!atomic_read(&pdp->used)); atomic_dec(&pdp->used); - free_pd(vm, pd); + GEM_BUG_ON(alloc); + alloc = pd; /* defer the free to after the lock */ } spin_unlock(&pdp->lock); unwind: @@ -1515,7 +1517,9 @@ unwind_pdp: spin_lock(&pml4->lock); if (atomic_dec_and_test(&pdp->used)) { gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); - free_pd(vm, pdp); + pml4->entry[pml4e] = vm->scratch_pdp; + GEM_BUG_ON(alloc); + alloc = pdp; /* defer the free until after the lock */ } spin_unlock(&pml4->lock); unwind: diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b7e9fddef270..8bc76fcff70d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1194,6 +1194,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, switch (engine->id) { default: MISSING_CASE(engine->id); + /* fall through */ case RCS0: mmio = RENDER_HWS_PGA_GEN7; break; @@ -1417,6 +1418,7 @@ static void gem_record_rings(struct i915_gpu_state *error) struct intel_engine_cs *engine = i915->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; struct i915_request *request; + unsigned long flags; ee->engine_id = -1; @@ -1428,10 +1430,11 @@ static void gem_record_rings(struct i915_gpu_state *error) error_record_engine_registers(error, engine, ee); error_record_engine_execlists(engine, ee); + spin_lock_irqsave(&engine->active.lock, flags); request = intel_engine_find_active_request(engine); if (request) { struct i915_gem_context *ctx = request->gem_context; - struct intel_ring *ring; + struct intel_ring *ring = request->ring; ee->vm = ctx->vm ?: &ggtt->vm; @@ -1461,7 +1464,6 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->rq_post = request->postfix; ee->rq_tail = request->tail; - ring = request->ring; ee->cpu_ring_head = ring->head; ee->cpu_ring_tail = ring->tail; ee->ringbuffer = @@ -1469,6 +1471,7 @@ static void gem_record_rings(struct i915_gpu_state *error) engine_record_requests(engine, request, ee); } + spin_unlock_irqrestore(&engine->active.lock, flags); ee->hws_page = i915_error_object_create(i915, diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index a700c5c3d167..5140017f9a39 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1567,28 +1567,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv, } } -static int hsw_enable_metric_set(struct i915_perf_stream *stream) +static void delay_after_mux(void) { - struct drm_i915_private *dev_priv = stream->dev_priv; - const struct i915_oa_config *oa_config = stream->oa_config; - - /* PRM: - * - * OA unit is using “crclk” for its functionality. When trunk - * level clock gating takes place, OA clock would be gated, - * unable to count the events from non-render clock domain. - * Render clock gating must be disabled when OA is enabled to - * count the events from non-render domain. Unit level clock - * gating for RCS should also be disabled. - */ - I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & - ~GEN7_DOP_CLOCK_GATE_ENABLE)); - I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | - GEN6_CSUNIT_CLOCK_GATE_DISABLE)); - - config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); - - /* It apparently takes a fairly long time for a new MUX + /* + * It apparently takes a fairly long time for a new MUX * configuration to be be applied after these register writes. * This delay duration was derived empirically based on the * render_basic config but hopefully it covers the maximum @@ -1610,6 +1592,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream) * a delay at this location would mitigate any invalid reports. */ usleep_range(15000, 20000); +} + +static int hsw_enable_metric_set(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + const struct i915_oa_config *oa_config = stream->oa_config; + + /* + * PRM: + * + * OA unit is using “crclk” for its functionality. When trunk + * level clock gating takes place, OA clock would be gated, + * unable to count the events from non-render clock domain. + * Render clock gating must be disabled when OA is enabled to + * count the events from non-render domain. Unit level clock + * gating for RCS should also be disabled. + */ + I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & + ~GEN7_DOP_CLOCK_GATE_ENABLE)); + I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | + GEN6_CSUNIT_CLOCK_GATE_DISABLE)); + + config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); + delay_after_mux(); config_oa_regs(dev_priv, oa_config->b_counter_regs, oa_config->b_counter_regs_len); @@ -1835,6 +1841,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) return ret; config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); + delay_after_mux(); config_oa_regs(dev_priv, oa_config->b_counter_regs, oa_config->b_counter_regs_len); @@ -2515,6 +2522,9 @@ static int i915_perf_release(struct inode *inode, struct file *file) i915_perf_destroy_locked(stream); mutex_unlock(&dev_priv->perf.lock); + /* Release the reference the perf stream kept on the driver. */ + drm_dev_put(&dev_priv->drm); + return 0; } @@ -2650,6 +2660,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, if (!(param->flags & I915_PERF_FLAG_DISABLED)) i915_perf_enable_locked(stream); + /* Take a reference on the driver that will be kept with stream_fd + * until its release. + */ + drm_dev_get(&dev_priv->drm); + return stream_fd; err_open: @@ -3477,9 +3492,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; - dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; - dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; - + if (IS_GEN(dev_priv, 10)) { + dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; + dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; + } else { + dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124; + dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e; + } dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); } } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index f4ce643b3bc3..cce426b23a24 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -21,24 +21,22 @@ /* watermark/fifo updates */ TRACE_EVENT(intel_pipe_enable, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), TP_STRUCT__entry( __array(u32, frame, 3) __array(u32, scanline, 3) __field(enum pipe, pipe) ), - TP_fast_assign( - enum pipe _pipe; - for_each_pipe(dev_priv, _pipe) { - __entry->frame[_pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); - __entry->scanline[_pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = pipe; + __entry->pipe = crtc->pipe; ), TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", @@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable, ); TRACE_EVENT(intel_pipe_disable, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), TP_STRUCT__entry( __array(u32, frame, 3) @@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable, ), TP_fast_assign( - enum pipe _pipe; - for_each_pipe(dev_priv, _pipe) { - __entry->frame[_pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); - __entry->scanline[_pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = pipe; + __entry->pipe = crtc->pipe; ), TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", @@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); ), @@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun, ), TP_fast_assign( + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); __entry->pipe = pipe; - __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pipe %c, frame=%u, scanline=%u", @@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun, TP_fast_assign( enum pipe pipe = pch_transcoder; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); __entry->pipe = pipe; - __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pch transcoder %c, frame=%u, scanline=%u", @@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr, ), TP_fast_assign( - enum pipe pipe; - for_each_pipe(dev_priv, pipe) { - __entry->frame[pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline[pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + struct intel_crtc *crtc; + for_each_intel_crtc(&dev_priv->drm, crtc) { + __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); + __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); } __entry->old = old; __entry->new = new; @@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; @@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->level = wm->level; __entry->cxsr = wm->cxsr; @@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->sprite0_start = sprite0_start; __entry->sprite1_start = sprite1_start; @@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane, TP_fast_assign( __entry->pipe = crtc->pipe; __entry->name = plane->name; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); @@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane, TP_fast_assign( __entry->pipe = crtc->pipe; __entry->name = plane->name; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), @@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->min = crtc->debug.min_vbl; __entry->max = crtc->debug.max_vbl; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 502c54428570..8d1aebc3e857 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, static void dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) { - struct drm_printer p; + if (debug->count) { + struct drm_printer p = drm_debug_printer("i915"); - if (!debug->count) - return; - - p = drm_debug_printer("i915"); - __print_intel_runtime_pm_wakeref(&p, debug); + __print_intel_runtime_pm_wakeref(&p, debug); + } kfree(debug->owners); } diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 9cbb2ebf575b..38275310b196 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -66,6 +66,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm, } /** + * intel_wakeref_get_if_in_use: Acquire the wakeref + * @wf: the wakeref + * + * Acquire a hold on the wakeref, but only if the wakeref is already + * active. + * + * Returns: true if the wakeref was acquired, false otherwise. + */ +static inline bool +intel_wakeref_get_if_active(struct intel_wakeref *wf) +{ + return atomic_inc_not_zero(&wf->count); +} + +/** * intel_wakeref_put: Release the wakeref * @i915: the drm_i915_private device * @wf: the wakeref diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 1671db47aa57..e9c55d1d6c04 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: /* copy commands into RB: */ obj = submit->bos[submit->cmd[i].idx].obj; @@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index be39cf01e51e..dc8ec2c94301 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 9acbbc0f3232..048c8be426f3 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* ignore if there has not been a ctx switch: */ if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 0e2f74163a16..0aa8a12c9952 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2221,8 +2221,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, if (ret) goto fail; - spin_lock_init(&dpu_enc->enc_spinlock); - atomic_set(&dpu_enc->frame_done_timeout_ms, 0); timer_setup(&dpu_enc->frame_done_timer, dpu_encoder_frame_done_timeout, 0); @@ -2276,6 +2274,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); + spin_lock_init(&dpu_enc->enc_spinlock); dpu_enc->enabled = false; return &dpu_enc->base; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index ff14555372d0..78d5fa230c16 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, mdp5_crtc->enabled = false; } +static void mdp5_crtc_vblank_on(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_interface *intf = mdp5_cstate->pipeline.intf; + u32 count; + + count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff; + drm_crtc_set_max_vblank_count(crtc, count); + + drm_crtc_vblank_on(crtc); +} + static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, } /* Restore vblank irq handling after power is enabled */ - drm_crtc_vblank_on(crtc); + mdp5_crtc_vblank_on(crtc); mdp5_crtc_mode_set_nofb(crtc); @@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) mdp5_crtc_destroy_state(crtc, crtc->state); __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); + + drm_crtc_vblank_reset(crtc); } static const struct drm_crtc_funcs mdp5_crtc_funcs = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 4a60f5fca6b0..fec6ef1ae3b9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; dev->driver->get_scanout_position = mdp5_get_scanoutpos; dev->driver->get_vblank_counter = mdp5_get_vblank_counter; - dev->max_vblank_count = 0xffffffff; + dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */ dev->vblank_disable_immediate = true; return kms; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index ab64ab470de7..c356f5ccf253 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -619,7 +619,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file) msm_submitqueue_init(dev, ctx); - ctx->aspace = priv->gpu->aspace; + ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL; file->driver_priv = ctx; return 0; @@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - drm_of_component_match_add(dev, matchptr, compare_of, np); + if (of_device_is_available(np)) + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 8b78554cfde3..8cf6362e64bf 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj) return !msm_obj->vram_node; } +/* + * Cache sync.. this is a bit over-complicated, to fit dma-mapping + * API. Really GPU cache is out of scope here (handled on cmdstream) + * and all we need to do is invalidate newly allocated pages before + * mapping to CPU as uncached/writecombine. + * + * On top of this, we have the added headache, that depending on + * display generation, the display's iommu may be wired up to either + * the toplevel drm device (mdss), or to the mdp sub-node, meaning + * that here we either have dma-direct or iommu ops. + * + * Let this be a cautionary tail of abstraction gone wrong. + */ + +static void sync_for_device(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_map_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + +static void sync_for_cpu(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) { @@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj) * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_map_sg(dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + sync_for_device(msm_obj); } return msm_obj->pages; @@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj) * GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, - DMA_BIDIRECTIONAL); + sync_for_cpu(msm_obj); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8497768f1b41..126703816794 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, connector->display_info.bpc * 3); - if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (crtc_state->mode_changed) { slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn); diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 8c92374afcf2..a835cebb6d90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm, fault->inst, fault->addr, fault->access); } +static inline bool +nouveau_range_done(struct hmm_range *range) +{ + bool ret = hmm_range_valid(range); + + hmm_range_unregister(range); + return ret; +} + +static int +nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) +{ + long ret; + + range->default_flags = 0; + range->pfn_flags_mask = -1UL; + + ret = hmm_range_register(range, mirror, + range->start, range->end, + PAGE_SHIFT); + if (ret) { + up_read(&range->vma->vm_mm->mmap_sem); + return (int)ret; + } + + if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { + up_read(&range->vma->vm_mm->mmap_sem); + return -EAGAIN; + } + + ret = hmm_range_fault(range, true); + if (ret <= 0) { + if (ret == 0) + ret = -EBUSY; + up_read(&range->vma->vm_mm->mmap_sem); + hmm_range_unregister(range); + return ret; + } + return 0; +} + static int nouveau_svm_fault(struct nvif_notify *notify) { @@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify) range.values = nouveau_svm_pfn_values; range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; again: - ret = hmm_vma_fault(&svmm->mirror, &range, true); + ret = nouveau_range_fault(&svmm->mirror, &range); if (ret == 0) { mutex_lock(&svmm->mutex); - if (!hmm_vma_range_done(&range)) { + if (!nouveau_range_done(&range)) { mutex_unlock(&svmm->mutex); goto again; } @@ -666,8 +707,8 @@ again: NULL); svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); + up_read(&svmm->mm->mmap_sem); } - up_read(&svmm->mm->mmap_sem); /* Cancel any faults in the window whose pages didn't manage * to keep their valid bit, or stay writeable when required. diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index d594f7520b7b..7d78e6deac89 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -285,9 +285,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool, static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) { + unsigned long attrs = 0; dma_addr_t dma = d_page->dma; d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; - dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma); + if (pool->type & IS_HUGE) + attrs = DMA_ATTR_NO_WARN; + + dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); kfree(d_page); d_page = NULL; diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index e7dff5febe16..d42bc0883a32 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -852,7 +852,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 }; static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 }; static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 }; static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a }; -static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c }; +static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b }; static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c }; static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d }; @@ -3764,6 +3764,7 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME; data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME; data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME; + data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H; data->REG_PWM[0] = NCT6106_REG_PWM; data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT; data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT; diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index a7d2b16dd702..30e18eb60da7 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -408,8 +408,10 @@ static ssize_t occ_show_power_1(struct device *dev, static u64 occ_get_powr_avg(u64 *accum, u32 *samples) { - return div64_u64(get_unaligned_be64(accum) * 1000000ULL, - get_unaligned_be32(samples)); + u64 divisor = get_unaligned_be32(samples); + + return (divisor == 0) ? 0 : + div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor); } static ssize_t occ_show_power_2(struct device *dev, diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index 8d55cdd69ff4..435c7d7377a3 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -142,7 +142,7 @@ static struct at91_twi_pdata sama5d4_config = { static struct at91_twi_pdata sama5d2_config = { .clk_max_div = 7, - .clk_offset = 4, + .clk_offset = 3, .has_unre_flag = true, .has_alt_cmd = true, .has_hold_field = true, diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index e87232f2e708..a3fcc35ffd3b 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c @@ -122,9 +122,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR); /* send stop when last byte has been written */ - if (--dev->buf_len == 0) + if (--dev->buf_len == 0) { if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); + at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY); + } dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len); @@ -542,9 +544,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) } else { at91_twi_write_next_byte(dev); at91_twi_write(dev, AT91_TWI_IER, - AT91_TWI_TXCOMP | - AT91_TWI_NACK | - AT91_TWI_TXRDY); + AT91_TWI_TXCOMP | AT91_TWI_NACK | + (dev->buf_len ? AT91_TWI_TXRDY : 0)); } } diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 2c7f145a036e..d7fd76baec92 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -392,16 +392,18 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c) { struct i2c_msg *msg = iproc_i2c->msg; + uint32_t val; /* Read valid data from RX FIFO */ while (iproc_i2c->rx_bytes < msg->len) { - if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT) - & M_FIFO_RX_CNT_MASK)) + val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET); + + /* rx fifo empty */ + if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK)) break; msg->buf[iproc_i2c->rx_bytes] = - (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >> - M_RX_DATA_SHIFT) & M_RX_DATA_MASK; + (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK; iproc_i2c->rx_bytes++; } } diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index cfc76b5de726..5a1235fd86bb 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -364,7 +364,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev) /* * We need gpu_i2c_suspend() even if it is stub, for runtime pm to work * correctly. Without it, lspci shows runtime pm status as "D0" for the card. - * Documentation/power/pci.txt also insists for driver to provide this. + * Documentation/power/pci.rst also insists for driver to provide this. */ static __maybe_unused int gpu_i2c_suspend(struct device *dev) { diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index d97fb857b0ea..c98ef4c4a0c9 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -435,6 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat) * fall through to the write state, as we will need to * send a byte as well */ + /* Fall through */ case STATE_WRITE: /* diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 888d89ce81df..beee7b7e0d9a 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -302,7 +302,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, struct ib_udata *udata, struct ib_uobject *uobj) { + enum ib_qp_type qp_type = attr->qp_type; struct ib_qp *qp; + bool is_xrc; if (!dev->ops.create_qp) return ERR_PTR(-EOPNOTSUPP); @@ -320,7 +322,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, * and more importantly they are created internaly by driver, * see mlx5 create_dev_resources() as an example. */ - if (attr->qp_type < IB_QPT_XRC_INI) { + is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT; + if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) { qp->res.type = RDMA_RESTRACK_QP; if (uobj) rdma_restrack_uadd(&qp->res); diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 01faef7bc061..45d5164e9574 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index) u64 sum; port_counter = &dev->port_data[port].port_counter; + if (!port_counter->hstats) + return 0; + sum = get_running_counters_hwstat_sum(dev, port, index); sum += port_counter->hstats->value[index]; @@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev) struct rdma_port_counter *port_counter; u32 port; - if (!dev->ops.alloc_hw_stats || !dev->port_data) + if (!dev->port_data) return; rdma_for_each_port(dev, port) { @@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev) port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; mutex_init(&port_counter->lock); + if (!dev->ops.alloc_hw_stats) + continue; + port_counter->hstats = dev->ops.alloc_hw_stats(dev, port); if (!port_counter->hstats) goto fail; @@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev) struct rdma_port_counter *port_counter; u32 port; - if (!dev->ops.alloc_hw_stats) - return; - rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; kfree(port_counter->hstats); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 9773145dee09..ea8661a00651 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -94,11 +94,17 @@ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); static DECLARE_RWSEM(devices_rwsem); #define DEVICE_REGISTERED XA_MARK_1 -static LIST_HEAD(client_list); +static u32 highest_client_id; #define CLIENT_REGISTERED XA_MARK_1 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); static DECLARE_RWSEM(clients_rwsem); +static void ib_client_put(struct ib_client *client) +{ + if (refcount_dec_and_test(&client->uses)) + complete(&client->uses_zero); +} + /* * If client_data is registered then the corresponding client must also still * be registered. @@ -661,6 +667,14 @@ static int add_client_context(struct ib_device *device, down_write(&device->client_data_rwsem); /* + * So long as the client is registered hold both the client and device + * unregistration locks. + */ + if (!refcount_inc_not_zero(&client->uses)) + goto out_unlock; + refcount_inc(&device->refcount); + + /* * Another caller to add_client_context got here first and has already * completely initialized context. */ @@ -683,6 +697,9 @@ static int add_client_context(struct ib_device *device, return 0; out: + ib_device_put(device); + ib_client_put(client); +out_unlock: up_write(&device->client_data_rwsem); return ret; } @@ -702,7 +719,7 @@ static void remove_client_context(struct ib_device *device, client_data = xa_load(&device->client_data, client_id); xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); client = xa_load(&clients, client_id); - downgrade_write(&device->client_data_rwsem); + up_write(&device->client_data_rwsem); /* * Notice we cannot be holding any exclusive locks when calling the @@ -712,17 +729,13 @@ static void remove_client_context(struct ib_device *device, * * For this reason clients and drivers should not call the * unregistration functions will holdling any locks. - * - * It tempting to drop the client_data_rwsem too, but this is required - * to ensure that unregister_client does not return until all clients - * are completely unregistered, which is required to avoid module - * unloading races. */ if (client->remove) client->remove(device, client_data); xa_erase(&device->client_data, client_id); - up_read(&device->client_data_rwsem); + ib_device_put(device); + ib_client_put(client); } static int alloc_port_data(struct ib_device *device) @@ -1224,7 +1237,7 @@ static int setup_device(struct ib_device *device) static void disable_device(struct ib_device *device) { - struct ib_client *client; + u32 cid; WARN_ON(!refcount_read(&device->refcount)); @@ -1232,10 +1245,19 @@ static void disable_device(struct ib_device *device) xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); up_write(&devices_rwsem); + /* + * Remove clients in LIFO order, see assign_client_id. This could be + * more efficient if xarray learns to reverse iterate. Since no new + * clients can be added to this ib_device past this point we only need + * the maximum possible client_id value here. + */ down_read(&clients_rwsem); - list_for_each_entry_reverse(client, &client_list, list) - remove_client_context(device, client->client_id); + cid = highest_client_id; up_read(&clients_rwsem); + while (cid) { + cid--; + remove_client_context(device, cid); + } /* Pairs with refcount_set in enable_device */ ib_device_put(device); @@ -1662,30 +1684,31 @@ static int assign_client_id(struct ib_client *client) /* * The add/remove callbacks must be called in FIFO/LIFO order. To * achieve this we assign client_ids so they are sorted in - * registration order, and retain a linked list we can reverse iterate - * to get the LIFO order. The extra linked list can go away if xarray - * learns to reverse iterate. + * registration order. */ - if (list_empty(&client_list)) { - client->client_id = 0; - } else { - struct ib_client *last; - - last = list_last_entry(&client_list, struct ib_client, list); - client->client_id = last->client_id + 1; - } + client->client_id = highest_client_id; ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); if (ret) goto out; + highest_client_id++; xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); - list_add_tail(&client->list, &client_list); out: up_write(&clients_rwsem); return ret; } +static void remove_client_id(struct ib_client *client) +{ + down_write(&clients_rwsem); + xa_erase(&clients, client->client_id); + for (; highest_client_id; highest_client_id--) + if (xa_load(&clients, highest_client_id - 1)) + break; + up_write(&clients_rwsem); +} + /** * ib_register_client - Register an IB client * @client:Client to register @@ -1705,6 +1728,8 @@ int ib_register_client(struct ib_client *client) unsigned long index; int ret; + refcount_set(&client->uses, 1); + init_completion(&client->uses_zero); ret = assign_client_id(client); if (ret) return ret; @@ -1740,21 +1765,30 @@ void ib_unregister_client(struct ib_client *client) unsigned long index; down_write(&clients_rwsem); + ib_client_put(client); xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); up_write(&clients_rwsem); - /* - * Every device still known must be serialized to make sure we are - * done with the client callbacks before we return. - */ - down_read(&devices_rwsem); - xa_for_each (&devices, index, device) + + /* We do not want to have locks while calling client->remove() */ + rcu_read_lock(); + xa_for_each (&devices, index, device) { + if (!ib_device_try_get(device)) + continue; + rcu_read_unlock(); + remove_client_context(device, client->client_id); - up_read(&devices_rwsem); - down_write(&clients_rwsem); - list_del(&client->list); - xa_erase(&clients, client->client_id); - up_write(&clients_rwsem); + ib_device_put(device); + rcu_read_lock(); + } + rcu_read_unlock(); + + /* + * remove_client_context() is not a fence, it can return even though a + * removal is ongoing. Wait until all removals are completed. + */ + wait_for_completion(&client->uses_zero); + remove_client_id(client); } EXPORT_SYMBOL(ib_unregister_client); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index cc99479b2c09..9947d16edef2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device, if (has_smi) cq_size *= 2; + port_priv->pd = ib_alloc_pd(device, 0); + if (IS_ERR(port_priv->pd)) { + dev_err(&device->dev, "Couldn't create ib_mad PD\n"); + ret = PTR_ERR(port_priv->pd); + goto error3; + } + port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, IB_POLL_UNBOUND_WORKQUEUE); if (IS_ERR(port_priv->cq)) { dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); - goto error3; - } - - port_priv->pd = ib_alloc_pd(device, 0); - if (IS_ERR(port_priv->pd)) { - dev_err(&device->dev, "Couldn't create ib_mad PD\n"); - ret = PTR_ERR(port_priv->pd); goto error4; } @@ -3278,11 +3278,11 @@ error8: error7: destroy_mad_qp(&port_priv->qp_info[0]); error6: - ib_dealloc_pd(port_priv->pd); -error4: ib_free_cq(port_priv->cq); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); +error4: + ib_dealloc_pd(port_priv->pd); error3: kfree(port_priv); @@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) destroy_workqueue(port_priv->wq); destroy_mad_qp(&port_priv->qp_info[1]); destroy_mad_qp(&port_priv->qp_info[0]); - ib_dealloc_pd(port_priv->pd); ib_free_cq(port_priv->cq); + ib_dealloc_pd(port_priv->pd); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); /* XXX: Handle deallocation of MAD registration tables */ diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 9f8a48016b41..ffdeaf6e0b68 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -49,6 +49,7 @@ #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/slab.h> +#include <linux/nospec.h> #include <linux/uaccess.h> @@ -884,11 +885,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) if (get_user(id, arg)) return -EFAULT; + if (id >= IB_UMAD_MAX_AGENTS) + return -EINVAL; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); - if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { + id = array_index_nospec(id, IB_UMAD_MAX_AGENTS); + if (!__get_agent(file, id)) { ret = -EINVAL; goto out; } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index a91653aabf38..098ab883733e 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; struct bnxt_qplib_gid *gid_to_del; + u16 vlan_id = 0xFFFF; /* Delete the entry from the hardware */ ctx = *context; @@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) if (sgid_tbl && sgid_tbl->active) { if (ctx->idx >= sgid_tbl->max) return -EINVAL; - gid_to_del = &sgid_tbl->tbl[ctx->idx]; + gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; + vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; /* DEL_GID is called in WQ context(netdevice_event_work_handler) * or via the ib_unregister_device path. In the former case QP1 * may not be destroyed yet, in which case just return as FW @@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) } ctx->refcnt--; if (!ctx->refcnt) { - rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); + rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, + vlan_id, true); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to remove GID: %#x", rc); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 37928b1111df..bdbde8e22420 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl, u16 max) { - sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL); + sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL); if (!sgid_tbl->tbl) return -ENOMEM; @@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, for (i = 0; i < sgid_tbl->max; i++) { if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero))) - bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true); + bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid, + sgid_tbl->tbl[i].vlan_id, true); } - memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); + memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max); memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); sgid_tbl->active = 0; @@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct net_device *netdev) { - memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); + u32 i; + + for (i = 0; i < sgid_tbl->max; i++) + sgid_tbl->tbl[i].vlan_id = 0xffff; + memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 30c42c92fac7..fbda11a7ab1a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl { }; struct bnxt_qplib_sgid_tbl { - struct bnxt_qplib_gid *tbl; + struct bnxt_qplib_gid_info *tbl; u16 *hw_id; u16 max; u16 active; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 48793d3512ac..40296b97d21e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, index, sgid_tbl->max); return -EINVAL; } - memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid)); + memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid)); return 0; } int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, bool update) + struct bnxt_qplib_gid *gid, u16 vlan_id, bool update) { struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, struct bnxt_qplib_res, @@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return -ENOMEM; } for (index = 0; index < sgid_tbl->max; index++) { - if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid))) + if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) && + vlan_id == sgid_tbl->tbl[index].vlan_id) break; } if (index == sgid_tbl->max) { @@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, if (rc) return rc; } - memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, + memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero)); + sgid_tbl->tbl[index].vlan_id = 0xFFFF; sgid_tbl->vlan[index] = 0; sgid_tbl->active--; dev_dbg(&res->pdev->dev, @@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } free_idx = sgid_tbl->max; for (i = 0; i < sgid_tbl->max; i++) { - if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) { + if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) && + sgid_tbl->tbl[i].vlan_id == vlan_id) { dev_dbg(&res->pdev->dev, "SGID entry already exist in entry %d!\n", i); *index = i; @@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } /* Add GID to the sgid_tbl */ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); + sgid_tbl->tbl[free_idx].vlan_id = vlan_id; sgid_tbl->active++; if (vlan_id != 0xFFFF) sgid_tbl->vlan[free_idx] = 1; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 0ec3b12b0bcd..13d9432d5ce2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -84,6 +84,11 @@ struct bnxt_qplib_gid { u8 data[16]; }; +struct bnxt_qplib_gid_info { + struct bnxt_qplib_gid gid; + u16 vlan_id; +}; + struct bnxt_qplib_ah { struct bnxt_qplib_gid dgid; struct bnxt_qplib_pd *pd; @@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, struct bnxt_qplib_gid *gid); int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, bool update); + struct bnxt_qplib_gid *gid, u16 vlan_id, bool update); int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id, bool update, u32 *index); diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index d5b643a1d9fd..67052dc3100c 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); } -static void init_rxe(struct hfi1_devdata *dd) +static int init_rxe(struct hfi1_devdata *dd) { struct rsm_map_table *rmt; u64 val; @@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd) write_csr(dd, RCV_ERR_MASK, ~0ull); rmt = alloc_rsm_map_table(dd); + if (!rmt) + return -ENOMEM; + /* set up QOS, including the QPN map table */ init_qos(dd, rmt); init_fecn_handling(dd, rmt); @@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd) val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << RCV_BYPASS_HDR_SIZE_SHIFT); write_csr(dd, RCV_BYPASS, val); + return 0; } static void init_other(struct hfi1_devdata *dd) @@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd) goto bail_cleanup; /* set initial RXE CSRs */ - init_rxe(dd); + ret = init_rxe(dd); + if (ret) + goto bail_cleanup; + /* set initial TXE CSRs */ init_txe(dd); /* set initial non-RXE, non-TXE CSRs */ diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 0477c14633ab..024a7c2b6124 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; trdma_clean_swqe(qp, wqe); - rvt_qp_wqe_unreserve(qp, wqe); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); rvt_qp_complete_swqe(qp, wqe, @@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { trdma_clean_swqe(qp, wqe); - rvt_qp_wqe_unreserve(qp, wqe); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); rvt_qp_complete_swqe(qp, wqe, diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 92acccaaaa86..996fc298207e 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, flows[i].req = req; flows[i].npagesets = 0; flows[i].pagesets[0].mapped = 0; + flows[i].resync_npkts = 0; } req->flows = flows; return 0; @@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req, return NULL; } -static struct tid_rdma_flow * -__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail, - u32 psn, u16 *fidx) -{ - for ( ; CIRC_CNT(head, tail, MAX_FLOWS); - tail = CIRC_NEXT(tail, MAX_FLOWS)) { - struct tid_rdma_flow *flow = &req->flows[tail]; - u32 spsn, lpsn; - - spsn = full_flow_psn(flow, flow->flow_state.spsn); - lpsn = full_flow_psn(flow, flow->flow_state.lpsn); - - if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) { - if (fidx) - *fidx = tail; - return flow; - } - } - return NULL; -} - -static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req, - u32 psn, u16 *fidx) -{ - return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn, - fidx); -} - /* TID RDMA READ functions */ u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, @@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, * to prevent continuous Flow Sequence errors for any * packets that could be still in the fabric. */ - flow = find_flow(req, psn, NULL); - if (!flow) { - /* - * We can't find the IB PSN matching the - * received KDETH PSN. The only thing we can - * do at this point is report the error to - * the QP. - */ - hfi1_kern_read_tid_flow_free(qp); - spin_unlock(&qp->s_lock); - rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); - return ret; - } + flow = &req->flows[req->clear_tail]; if (priv->s_flags & HFI1_R_TID_SW_PSN) { diff = cmp_psn(psn, flow->flow_state.r_next_psn); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index c4b243f50c76..646f61545ed6 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -54,6 +54,7 @@ #include <linux/mm.h> #include <linux/vmalloc.h> #include <rdma/opa_addr.h> +#include <linux/nospec.h> #include "hfi.h" #include "common.h" @@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) sl = rdma_ah_get_sl(ah_attr); if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) return -EINVAL; + sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc)); sc5 = ibp->sl_to_sc[sl]; if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index 8bf847bcd8d3..54782197c717 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config INFINIBAND_HNS - tristate "HNS RoCE Driver" + bool "HNS RoCE Driver" depends on NET_VENDOR_HISILICON depends on ARM64 || (COMPILE_TEST && 64BIT) ---help--- @@ -11,7 +11,7 @@ config INFINIBAND_HNS To compile HIP06 or HIP08 driver as module, choose M here. config INFINIBAND_HNS_HIP06 - bool "Hisilicon Hip06 Family RoCE support" + tristate "Hisilicon Hip06 Family RoCE support" depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET ---help--- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and @@ -21,7 +21,7 @@ config INFINIBAND_HNS_HIP06 module will be called hns-roce-hw-v1 config INFINIBAND_HNS_HIP08 - bool "Hisilicon Hip08 Family RoCE support" + tristate "Hisilicon Hip08 Family RoCE support" depends on INFINIBAND_HNS && PCI && HNS3 ---help--- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index e105945b94a1..449a2d81319d 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile @@ -9,12 +9,8 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o -ifdef CONFIG_INFINIBAND_HNS_HIP06 hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) -obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o -endif +obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o -ifdef CONFIG_INFINIBAND_HNS_HIP08 hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) -obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o -endif +obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 627aa46ef683..c00714c2f16a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, struct ib_udata *udata, unsigned long virt, struct hns_roce_db *db) { + unsigned long page_addr = virt & PAGE_MASK; struct hns_roce_user_db_page *page; + unsigned int offset; int ret = 0; mutex_lock(&context->page_mutex); list_for_each_entry(page, &context->page_list, list) - if (page->user_virt == (virt & PAGE_MASK)) + if (page->user_virt == page_addr) goto found; page = kmalloc(sizeof(*page), GFP_KERNEL); @@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, } refcount_set(&page->refcount, 1); - page->user_virt = (virt & PAGE_MASK); - page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); + page->user_virt = page_addr; + page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0); if (IS_ERR(page->umem)) { ret = PTR_ERR(page->umem); kfree(page); @@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, list_add(&page->list, &context->page_list); found: - db->dma = sg_dma_address(page->umem->sg_head.sgl) + - (virt & ~PAGE_MASK); - page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK; - db->virt_addr = sg_virt(page->umem->sg_head.sgl); + offset = virt - page_addr; + db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset; + db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset; db->u.user_page = page; refcount_inc(&page->refcount); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 81e6dedb1e02..c07e387a07a3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -750,8 +750,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); pd = rdma_zalloc_drv_obj(ibdev, ib_pd); - if (!pd) + if (!pd) { + ret = -ENOMEM; goto alloc_mem_failed; + } pd->device = ibdev; ret = hns_roce_alloc_pd(pd, NULL); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c2a5780cb394..e12a4404096b 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -5802,13 +5802,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, return; } - if (mpi->mdev_events.notifier_call) - mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); - mpi->mdev_events.notifier_call = NULL; - mpi->ibdev = NULL; spin_unlock(&port->mp.mpi_lock); + if (mpi->mdev_events.notifier_call) + mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); + mpi->mdev_events.notifier_call = NULL; mlx5_remove_netdev_notifier(ibdev, port_num); spin_lock(&port->mp.mpi_lock); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index c482f19958b3..f6a53455bf8b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -481,6 +481,7 @@ struct mlx5_umr_wr { u64 length; int access_flags; u32 mkey; + u8 ignore_free_state:1; }; static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 20ece6e0b2fc..b74fad08412f 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev) -{ - return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled); -} static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) { return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); } -static bool use_umr(struct mlx5_ib_dev *dev, int order) -{ - return order <= mr_cache_max_order(dev) && - umr_can_modify_entity_size(dev); -} - static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); @@ -545,13 +535,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return; c = order2idx(dev, mr->order); - if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { - mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); - return; - } + WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES); - if (unreg_umr(dev, mr)) + if (unreg_umr(dev, mr)) { + mr->allocated_from_cache = false; + destroy_mkey(dev, mr); + ent = &cache->ent[c]; + if (ent->cur < ent->limit) + queue_work(cache->wq, &ent->work); return; + } ent = &cache->ent[c]; spin_lock_irq(&ent->lock); @@ -1268,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_mr *mr = NULL; - bool populate_mtts = false; + bool use_umr; struct ib_umem *umem; int page_shift; int npages; @@ -1300,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - if (use_umr(dev, order)) { + use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && + (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) || + !MLX5_CAP_GEN(dev->mdev, atomic)); + + if (order <= mr_cache_max_order(dev) && use_umr) { mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, page_shift, order, access_flags); if (PTR_ERR(mr) == -EAGAIN) { mlx5_ib_dbg(dev, "cache empty for order %d\n", order); mr = NULL; } - populate_mtts = false; } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { if (access_flags & IB_ACCESS_ON_DEMAND) { err = -EINVAL; pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); goto error; } - populate_mtts = true; + use_umr = false; } if (!mr) { - if (!umr_can_modify_entity_size(dev)) - populate_mtts = true; mutex_lock(&dev->slow_path_mutex); mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, - page_shift, access_flags, populate_mtts); + page_shift, access_flags, !use_umr); mutex_unlock(&dev->slow_path_mutex); } @@ -1338,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, update_odp_mr(mr); - if (!populate_mtts) { + if (use_umr) { int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; if (access_flags & IB_ACCESS_ON_DEMAND) @@ -1373,9 +1367,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return 0; umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | - MLX5_IB_SEND_UMR_FAIL_IF_FREE; + MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; umrwr.wr.opcode = MLX5_IB_WR_UMR; + umrwr.pd = dev->umrc.pd; umrwr.mkey = mr->mmkey.key; + umrwr.ignore_free_state = 1; return mlx5_ib_post_send_wait(dev, &umrwr); } @@ -1577,10 +1573,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) mr->sig = NULL; } - mlx5_free_priv_descs(mr); - - if (!allocated_from_cache) + if (!allocated_from_cache) { destroy_mkey(dev, mr); + mlx5_free_priv_descs(mr); + } } static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 5b642d81e617..81da82050d05 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, * overwrite the same MTTs. Concurent invalidations might race us, * but they will write 0s as well, so no difference in the end result. */ - + mutex_lock(&umem_odp->umem_mutex); for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; /* @@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, idx - blk_start_idx + 1, 0, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); + mutex_unlock(&umem_odp->umem_mutex); /* * We are now sure that the device will not access the * memory. We can safely unmap it, and mark it as dirty if @@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work) num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list, w->num_sge, 0); - kfree(w); + kvfree(w); } int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, @@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, if (valid_req) queue_work(system_unbound_wq, &work->work); else - kfree(work); + kvfree(work); srcu_read_unlock(&dev->mr_srcu, srcu_key); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 2a97619ed603..379328b2598f 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, } MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); memcpy(rss_key, ucmd.rx_hash_key, len); break; } @@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev, memset(umr, 0, sizeof(*umr)); - if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) - umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */ - else - umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ + if (!umrwr->ignore_free_state) { + if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) + /* fail if free */ + umr->flags = MLX5_UMR_CHECK_FREE; + else + /* fail if not free */ + umr->flags = MLX5_UMR_CHECK_NOT_FREE; + } umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 533157a2a3be..f97b3d65b30c 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, struct qedr_dev *dev = rdma_device_to_drv_device(device, struct qedr_dev, ibdev); - return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor); + return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver); } static DEVICE_ATTR_RO(hw_rev); static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET"); + struct qedr_dev *dev = + rdma_device_to_drv_device(device, struct qedr_dev, ibdev); + + return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n", + dev->pdev->device, + rdma_protocol_iwarp(&dev->ibdev, 1) ? + "iWARP" : "RoCE"); } static DEVICE_ATTR_RO(hca_type); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index a7cde98e73e8..9ce8a1b925d2 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work) static void siw_cep_set_inuse(struct siw_cep *cep) { unsigned long flags; - int rv; retry: spin_lock_irqsave(&cep->lock, flags); if (cep->in_use) { spin_unlock_irqrestore(&cep->lock, flags); - rv = wait_event_interruptible(cep->waitq, !cep->in_use); + wait_event_interruptible(cep->waitq, !cep->in_use); if (signal_pending(current)) flush_signals(current); goto retry; diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index f55c4e80aea4..d0f140daf659 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -612,6 +612,7 @@ static __init int siw_init_module(void) if (!siw_create_tx_threads()) { pr_info("siw: Could not start any TX thread\n"); + rv = -ENOMEM; goto out_error; } /* diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index 11383d9f95ef..e27bd5b35b96 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -220,12 +220,14 @@ static int siw_qp_enable_crc(struct siw_qp *qp) { struct siw_rx_stream *c_rx = &qp->rx_stream; struct siw_iwarp_tx *c_tx = &qp->tx_ctx; - int size = crypto_shash_descsize(siw_crypto_shash) + - sizeof(struct shash_desc); + int size; if (siw_crypto_shash == NULL) return -ENOENT; + size = crypto_shash_descsize(siw_crypto_shash) + + sizeof(struct shash_desc); + c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) { diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index eb104c719629..4413aa67000e 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -23,6 +23,8 @@ #include <linux/mem_encrypt.h> #include <asm/pci-direct.h> #include <asm/iommu.h> +#include <asm/apic.h> +#include <asm/msidef.h> #include <asm/gart.h> #include <asm/x86_init.h> #include <asm/iommu_table.h> @@ -1920,6 +1922,90 @@ static int iommu_setup_msi(struct amd_iommu *iommu) return 0; } +#define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2) +#define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8) +#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32) +#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56) + +/** + * Setup the IntCapXT registers with interrupt routing information + * based on the PCI MSI capability block registers, accessed via + * MMIO MSI address low/hi and MSI data registers. + */ +static void iommu_update_intcapxt(struct amd_iommu *iommu) +{ + u64 val; + u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET); + u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET); + u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET); + bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; + u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF); + + if (x2apic_enabled()) + dest |= MSI_ADDR_EXT_DEST_ID(addr_hi); + + val = XT_INT_VEC(data & 0xFF) | + XT_INT_DEST_MODE(dm) | + XT_INT_DEST_LO(dest) | + XT_INT_DEST_HI(dest); + + /** + * Current IOMMU implemtation uses the same IRQ for all + * 3 IOMMU interrupts. + */ + writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); + writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); + writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); +} + +static void _irq_notifier_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct amd_iommu *iommu; + + for_each_iommu(iommu) { + if (iommu->dev->irq == notify->irq) { + iommu_update_intcapxt(iommu); + break; + } + } +} + +static void _irq_notifier_release(struct kref *ref) +{ +} + +static int iommu_init_intcapxt(struct amd_iommu *iommu) +{ + int ret; + struct irq_affinity_notify *notify = &iommu->intcapxt_notify; + + /** + * IntCapXT requires XTSup=1, which can be inferred + * amd_iommu_xt_mode. + */ + if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE) + return 0; + + /** + * Also, we need to setup notifier to update the IntCapXT registers + * whenever the irq affinity is changed from user-space. + */ + notify->irq = iommu->dev->irq; + notify->notify = _irq_notifier_notify, + notify->release = _irq_notifier_release, + ret = irq_set_affinity_notifier(iommu->dev->irq, notify); + if (ret) { + pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n", + iommu->devid, iommu->dev->irq); + return ret; + } + + iommu_update_intcapxt(iommu); + iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); + return ret; +} + static int iommu_init_msi(struct amd_iommu *iommu) { int ret; @@ -1936,6 +2022,10 @@ static int iommu_init_msi(struct amd_iommu *iommu) return ret; enable_faults: + ret = iommu_init_intcapxt(iommu); + if (ret) + return ret; + iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); if (iommu->ppr_log != NULL) diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 52c35d557fad..64edd5a9694c 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -60,6 +60,12 @@ #define MMIO_PPR_LOG_OFFSET 0x0038 #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 +#define MMIO_MSI_ADDR_LO_OFFSET 0x015C +#define MMIO_MSI_ADDR_HI_OFFSET 0x0160 +#define MMIO_MSI_DATA_OFFSET 0x0164 +#define MMIO_INTCAPXT_EVT_OFFSET 0x0170 +#define MMIO_INTCAPXT_PPR_OFFSET 0x0178 +#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 #define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_EVT_HEAD_OFFSET 0x2010 @@ -150,6 +156,7 @@ #define CONTROL_GALOG_EN 0x1CULL #define CONTROL_GAINT_EN 0x1DULL #define CONTROL_XT_EN 0x32ULL +#define CONTROL_INTCAPXT_EN 0x33ULL #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) #define CTRL_INV_TO_NONE 0 @@ -592,6 +599,8 @@ struct amd_iommu { /* DebugFS Info */ struct dentry *debugfs; #endif + /* IRQ notifier for IntCapXT interrupt */ + struct irq_affinity_notify intcapxt_notify; }; static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c index 73a552914455..2b25d9c59336 100644 --- a/drivers/iommu/intel-iommu-debugfs.c +++ b/drivers/iommu/intel-iommu-debugfs.c @@ -162,9 +162,9 @@ static inline void print_tbl_walk(struct seq_file *m) (u64)0, (u64)0, (u64)0); else seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", - tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0], + tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2], tbl_wlk->pasid_tbl_entry->val[1], - tbl_wlk->pasid_tbl_entry->val[2]); + tbl_wlk->pasid_tbl_entry->val[0]); } static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry, diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ac4172c02244..bdaed2da8a55 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -339,8 +339,6 @@ static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct device *dev); static void __dmar_remove_one_dev_info(struct device_domain_info *info); -static void domain_context_clear(struct intel_iommu *iommu, - struct device *dev); static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); static bool device_is_rmrr_locked(struct device *dev); @@ -1833,9 +1831,65 @@ static inline int guestwidth_to_adjustwidth(int gaw) return agaw; } +static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, + int guest_width) +{ + int adjust_width, agaw; + unsigned long sagaw; + int err; + + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); + + err = init_iova_flush_queue(&domain->iovad, + iommu_flush_iova, iova_entry_free); + if (err) + return err; + + domain_reserve_special_ranges(domain); + + /* calculate AGAW */ + if (guest_width > cap_mgaw(iommu->cap)) + guest_width = cap_mgaw(iommu->cap); + domain->gaw = guest_width; + adjust_width = guestwidth_to_adjustwidth(guest_width); + agaw = width_to_agaw(adjust_width); + sagaw = cap_sagaw(iommu->cap); + if (!test_bit(agaw, &sagaw)) { + /* hardware doesn't support it, choose a bigger one */ + pr_debug("Hardware doesn't support agaw %d\n", agaw); + agaw = find_next_bit(&sagaw, 5, agaw); + if (agaw >= 5) + return -ENODEV; + } + domain->agaw = agaw; + + if (ecap_coherent(iommu->ecap)) + domain->iommu_coherency = 1; + else + domain->iommu_coherency = 0; + + if (ecap_sc_support(iommu->ecap)) + domain->iommu_snooping = 1; + else + domain->iommu_snooping = 0; + + if (intel_iommu_superpage) + domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); + else + domain->iommu_superpage = 0; + + domain->nid = iommu->node; + + /* always allocate the top pgd */ + domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); + if (!domain->pgd) + return -ENOMEM; + __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); + return 0; +} + static void domain_exit(struct dmar_domain *domain) { - struct page *freelist; /* Remove associated devices and clear attached or cached domains */ domain_remove_dev_info(domain); @@ -1843,9 +1897,12 @@ static void domain_exit(struct dmar_domain *domain) /* destroy iovas */ put_iova_domain(&domain->iovad); - freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + if (domain->pgd) { + struct page *freelist; - dma_free_pagelist(freelist); + freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + dma_free_pagelist(freelist); + } free_domain_mem(domain); } @@ -2048,26 +2105,9 @@ out_unlock: return ret; } -struct domain_context_mapping_data { - struct dmar_domain *domain; - struct intel_iommu *iommu; - struct pasid_table *table; -}; - -static int domain_context_mapping_cb(struct pci_dev *pdev, - u16 alias, void *opaque) -{ - struct domain_context_mapping_data *data = opaque; - - return domain_context_mapping_one(data->domain, data->iommu, - data->table, PCI_BUS_NUM(alias), - alias & 0xff); -} - static int domain_context_mapping(struct dmar_domain *domain, struct device *dev) { - struct domain_context_mapping_data data; struct pasid_table *table; struct intel_iommu *iommu; u8 bus, devfn; @@ -2077,17 +2117,7 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) return -ENODEV; table = intel_pasid_get_table(dev); - - if (!dev_is_pci(dev)) - return domain_context_mapping_one(domain, iommu, table, - bus, devfn); - - data.domain = domain; - data.iommu = iommu; - data.table = table; - - return pci_for_each_dma_alias(to_pci_dev(dev), - &domain_context_mapping_cb, &data); + return domain_context_mapping_one(domain, iommu, table, bus, devfn); } static int domain_context_mapped_cb(struct pci_dev *pdev, @@ -2513,31 +2543,6 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) return 0; } -static int domain_init(struct dmar_domain *domain, int guest_width) -{ - int adjust_width; - - init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); - domain_reserve_special_ranges(domain); - - /* calculate AGAW */ - domain->gaw = guest_width; - adjust_width = guestwidth_to_adjustwidth(guest_width); - domain->agaw = width_to_agaw(adjust_width); - - domain->iommu_coherency = 0; - domain->iommu_snooping = 0; - domain->iommu_superpage = 0; - domain->max_addr = 0; - - /* always allocate the top pgd */ - domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); - if (!domain->pgd) - return -ENOMEM; - domain_flush_cache(domain, domain->pgd, PAGE_SIZE); - return 0; -} - static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) { struct device_domain_info *info; @@ -2575,19 +2580,11 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) domain = alloc_domain(0); if (!domain) return NULL; - - if (domain_init(domain, gaw)) { + if (domain_init(domain, iommu, gaw)) { domain_exit(domain); return NULL; } - if (init_iova_flush_queue(&domain->iovad, - iommu_flush_iova, - iova_entry_free)) { - pr_warn("iova flush queue initialization failed\n"); - intel_iommu_strict = 1; - } - out: return domain; } @@ -2692,6 +2689,8 @@ static int domain_prepare_identity_map(struct device *dev, return iommu_domain_identity_map(domain, start, end); } +static int md_domain_init(struct dmar_domain *domain, int guest_width); + static int __init si_domain_init(int hw) { struct dmar_rmrr_unit *rmrr; @@ -2702,7 +2701,7 @@ static int __init si_domain_init(int hw) if (!si_domain) return -EFAULT; - if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { domain_exit(si_domain); return -EFAULT; } @@ -3564,7 +3563,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) freelist = domain_unmap(domain, start_pfn, last_pfn); - if (intel_iommu_strict || (pdev && pdev->untrusted)) { + if (intel_iommu_strict || (pdev && pdev->untrusted) || + !has_iova_flush_queue(&domain->iovad)) { iommu_flush_iotlb_psi(iommu, domain, start_pfn, nrpages, !freelist, 0); /* free iova */ @@ -4758,28 +4758,6 @@ out_free_dmar: return ret; } -static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) -{ - struct intel_iommu *iommu = opaque; - - domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); - return 0; -} - -/* - * NB - intel-iommu lacks any sort of reference counting for the users of - * dependent devices. If multiple endpoints have intersecting dependent - * devices, unbinding the driver from any one of them will possibly leave - * the others unable to operate. - */ -static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) -{ - if (!iommu || !dev || !dev_is_pci(dev)) - return; - - pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); -} - static void __dmar_remove_one_dev_info(struct device_domain_info *info) { struct dmar_domain *domain; @@ -4800,7 +4778,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) PASID_RID2PASID); iommu_disable_dev_iotlb(info); - domain_context_clear(iommu, info->dev); + domain_context_clear_one(iommu, info->bus, info->devfn); intel_pasid_free_table(info->dev); } @@ -4829,6 +4807,31 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_unlock_irqrestore(&device_domain_lock, flags); } +static int md_domain_init(struct dmar_domain *domain, int guest_width) +{ + int adjust_width; + + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); + domain_reserve_special_ranges(domain); + + /* calculate AGAW */ + domain->gaw = guest_width; + adjust_width = guestwidth_to_adjustwidth(guest_width); + domain->agaw = width_to_agaw(adjust_width); + + domain->iommu_coherency = 0; + domain->iommu_snooping = 0; + domain->iommu_superpage = 0; + domain->max_addr = 0; + + /* always allocate the top pgd */ + domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); + if (!domain->pgd) + return -ENOMEM; + domain_flush_cache(domain, domain->pgd, PAGE_SIZE); + return 0; +} + static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; @@ -4843,7 +4846,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) pr_err("Can't allocate dmar_domain\n"); return NULL; } - if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { pr_err("Domain initialization failed\n"); domain_exit(dmar_domain); return NULL; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index d499b2621239..3e1a8a675572 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, } EXPORT_SYMBOL_GPL(init_iova_domain); +bool has_iova_flush_queue(struct iova_domain *iovad) +{ + return !!iovad->fq; +} + static void free_iova_flush_queue(struct iova_domain *iovad) { - if (!iovad->fq) + if (!has_iova_flush_queue(iovad)) return; if (timer_pending(&iovad->fq_timer)) @@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad) int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) { + struct iova_fq __percpu *queue; int cpu; atomic64_set(&iovad->fq_flush_start_cnt, 0); atomic64_set(&iovad->fq_flush_finish_cnt, 0); - iovad->fq = alloc_percpu(struct iova_fq); - if (!iovad->fq) + queue = alloc_percpu(struct iova_fq); + if (!queue) return -ENOMEM; iovad->flush_cb = flush_cb; @@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad, for_each_possible_cpu(cpu) { struct iova_fq *fq; - fq = per_cpu_ptr(iovad->fq, cpu); + fq = per_cpu_ptr(queue, cpu); fq->head = 0; fq->tail = 0; spin_lock_init(&fq->lock); } + smp_wmb(); + + iovad->fq = queue; + timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); atomic_set(&iovad->fq_timer_on, 0); @@ -127,8 +137,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) struct iova *cached_iova; cached_iova = rb_entry(iovad->cached32_node, struct iova, node); - if (free->pfn_hi < iovad->dma_32bit_pfn && - free->pfn_lo >= cached_iova->pfn_lo) { + if (free == cached_iova || + (free->pfn_hi < iovad->dma_32bit_pfn && + free->pfn_lo >= cached_iova->pfn_lo)) { iovad->cached32_node = rb_next(&free->node); iovad->max32_alloc_size = iovad->dma_32bit_pfn; } diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 433f4d2ee956..80a740df0737 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -2,7 +2,7 @@ /* * Virtio driver for the paravirtualized IOMMU * - * Copyright (C) 2018 Arm Limited + * Copyright (C) 2019 Arm Limited */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -47,7 +47,10 @@ struct viommu_dev { /* Device configuration */ struct iommu_domain_geometry geometry; u64 pgsize_bitmap; - u8 domain_bits; + u32 first_domain; + u32 last_domain; + /* Supported MAP flags */ + u32 map_flags; u32 probe_size; }; @@ -62,6 +65,7 @@ struct viommu_domain { struct viommu_dev *viommu; struct mutex mutex; /* protects viommu pointer */ unsigned int id; + u32 map_flags; spinlock_t mappings_lock; struct rb_root_cached mappings; @@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len) return -ENOENT; case VIRTIO_IOMMU_S_FAULT: return -EFAULT; + case VIRTIO_IOMMU_S_NOMEM: + return -ENOMEM; case VIRTIO_IOMMU_S_IOERR: case VIRTIO_IOMMU_S_DEVERR: default: @@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu, { int ret; struct viommu_domain *vdomain = to_viommu_domain(domain); - unsigned int max_domain = viommu->domain_bits > 31 ? ~0 : - (1U << viommu->domain_bits) - 1; vdomain->viommu = viommu; + vdomain->map_flags = viommu->map_flags; domain->pgsize_bitmap = viommu->pgsize_bitmap; domain->geometry = viommu->geometry; - ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); + ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, + viommu->last_domain, GFP_KERNEL); if (ret >= 0) vdomain->id = (unsigned int)ret; @@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { int ret; - int flags; + u32 flags; struct virtio_iommu_req_map map; struct viommu_domain *vdomain = to_viommu_domain(domain); @@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); + if (flags & ~vdomain->map_flags) + return -EINVAL; + ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); if (ret) return ret; @@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev) goto err_free_vqs; } - viommu->domain_bits = 32; + viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; + viommu->last_domain = ~0U; /* Optional features */ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, @@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev) struct virtio_iommu_config, input_range.end, &input_end); - virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, - struct virtio_iommu_config, domain_bits, - &viommu->domain_bits); + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.start, + &viommu->first_domain); + + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.end, + &viommu->last_domain); virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, struct virtio_iommu_config, probe_size, @@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev) .force_aperture = true, }; + if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) + viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; + viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; virtio_device_ready(vdev); @@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev) static unsigned int features[] = { VIRTIO_IOMMU_F_MAP_UNMAP, - VIRTIO_IOMMU_F_DOMAIN_BITS, VIRTIO_IOMMU_F_INPUT_RANGE, + VIRTIO_IOMMU_F_DOMAIN_RANGE, VIRTIO_IOMMU_F_PROBE, + VIRTIO_IOMMU_F_MMIO, }; static struct virtio_device_id id_table[] = { diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 730fbe0e2a9d..1b5c3672aea2 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe) if (!its_alloc_vpe_table(vpe_id)) { its_vpe_id_free(vpe_id); - its_free_pending_table(vpe->vpt_page); + its_free_pending_table(vpt_page); return -ENOMEM; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 9bca4896fa6f..96d927f0f91a 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -771,8 +771,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP0R3_EL1); write_gicreg(0, ICC_AP0R2_EL1); + /* Fall through */ case 6: write_gicreg(0, ICC_AP0R1_EL1); + /* Fall through */ case 5: case 4: write_gicreg(0, ICC_AP0R0_EL1); @@ -786,8 +788,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP1R3_EL1); write_gicreg(0, ICC_AP1R2_EL1); + /* Fall through */ case 6: write_gicreg(0, ICC_AP1R1_EL1); + /* Fall through */ case 5: case 4: write_gicreg(0, ICC_AP1R0_EL1); diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index bf2237ac5d09..4f74c15c4755 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = { .irq_unmask = imx_gpcv2_irq_unmask, .irq_set_wake = imx_gpcv2_irq_set_wake, .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = irq_chip_set_type_parent, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 3dd28382d5f5..3f09f658e8e2 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -241,12 +241,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev, parent = platform_bus_type.dev_root; child = of_platform_device_create(np, NULL, parent); - if (!child) + if (!child) { + of_node_put(np); return -ENOMEM; + } if (of_property_read_u32(child->dev.of_node, "num-pins", &num_pins) < 0) { dev_err(&pdev->dev, "No num-pins property\n"); + of_node_put(np); return -EINVAL; } @@ -254,8 +257,10 @@ static int mbigen_of_create_domain(struct platform_device *pdev, mbigen_write_msg, &mbigen_domain_ops, mgn_chip); - if (!domain) + if (!domain) { + of_node_put(np); return -ENOMEM; + } } return 0; diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 276065c888bc..23f1f41c8602 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -852,6 +852,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd) break; case SMU_I2C_TRANSFER_COMBINED: cmd->info.devaddr &= 0xfe; + /* fall through */ case SMU_I2C_TRANSFER_STDSUB: if (cmd->info.sublen > 3) return -EINVAL; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 26e374fbf57c..20ed838e9413 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -931,6 +931,9 @@ int bch_cached_dev_run(struct cached_dev *dc) if (dc->io_disable) { pr_err("I/O disabled on cached dev %s", dc->backing_dev_name); + kfree(env[1]); + kfree(env[2]); + kfree(buf); return -EIO; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index caaee8032afe..7b6c3ee9e755 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -882,23 +882,23 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); /* validate the dax capability of the target device span */ int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) + sector_t start, sector_t len, void *data) { int blocksize = *(int *) data; return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize, - start, len); + start, len); } /* Check devices support synchronous DAX */ -static int device_synchronous(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - return dax_synchronous(dev->dax_dev); + return dev->dax_dev && dax_synchronous(dev->dax_dev); } bool dm_table_supports_dax(struct dm_table *t, - iterate_devices_callout_fn iterate_fn, int *blocksize) + iterate_devices_callout_fn iterate_fn, int *blocksize) { struct dm_target *ti; unsigned i; @@ -911,7 +911,7 @@ bool dm_table_supports_dax(struct dm_table *t, return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, iterate_fn, blocksize)) + !ti->type->iterate_devices(ti, iterate_fn, blocksize)) return false; } @@ -1921,7 +1921,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { blk_queue_flag_set(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax(t, device_synchronous, NULL)) + if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) set_dax_synchronous(t->md->dax_dev); } else diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 21fb90d66bfc..25c73c13cc7e 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -124,7 +124,7 @@ static inline int check_which(__u32 which) static inline int check_pad(struct v4l2_subdev *sd, __u32 pad) { #if defined(CONFIG_MEDIA_CONTROLLER) - if (sd->entity.graph_obj.mdev) { + if (sd->entity.num_pads) { if (pad >= sd->entity.num_pads) return -EINVAL; return 0; diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index f88094719552..f2abe27010ef 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -5,6 +5,7 @@ config EEPROM_AT24 tristate "I2C EEPROMs / RAMs / ROMs from most vendors" depends on I2C && SYSFS select NVMEM + select NVMEM_SYSFS select REGMAP_I2C help Enable this driver to get read/write support to most I2C EEPROMs @@ -34,6 +35,7 @@ config EEPROM_AT25 tristate "SPI EEPROMs from most vendors" depends on SPI && SYSFS select NVMEM + select NVMEM_SYSFS help Enable this driver to get read/write support to most SPI EEPROMs, after you configure the board init code to know about each eeprom @@ -80,6 +82,7 @@ config EEPROM_93XX46 depends on SPI && SYSFS select REGMAP select NVMEM + select NVMEM_SYSFS help Driver for the microwire EEPROM chipsets 93xx46x. The driver supports both read and write commands and also the command to diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 35bf2477693d..518945b2f737 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -685,7 +685,7 @@ static int at24_probe(struct i2c_client *client) nvmem_config.name = dev_name(dev); nvmem_config.dev = dev; nvmem_config.read_only = !writable; - nvmem_config.root_only = true; + nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO); nvmem_config.owner = THIS_MODULE; nvmem_config.compat = true; nvmem_config.base_dev = dev; diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 75294ec65257..1a2c062a57d4 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -695,8 +695,8 @@ static int goya_sw_init(struct hl_device *hdev) goto free_dma_pool; } - dev_dbg(hdev->dev, "cpu accessible memory at bus address 0x%llx\n", - hdev->cpu_accessible_dma_address); + dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n", + &hdev->cpu_accessible_dma_address); hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); if (!hdev->cpu_accessible_dma_pool) { @@ -4449,7 +4449,6 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) case GOYA_ASYNC_EVENT_ID_AXI_ECC: case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC: case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET: - case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT: goya_print_irq_info(hdev, event_type, false); hl_device_reset(hdev, true, false); break; @@ -4485,6 +4484,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) goya_unmask_irq(hdev, event_type); break; + case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT: case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU: case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU: case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU: diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index d74b182e19f3..6c0173772162 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -81,6 +81,9 @@ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ +#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ +#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 7a2b3545a7f9..57cb68f5cc64 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -98,6 +98,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index e327f80ebe70..7102e2ebc614 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -10,6 +10,7 @@ #include <linux/kthread.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> +#include <linux/backing-dev.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) goto free_tag_set; } + if (mmc_host_is_spi(host) && host->use_spi_crc) + mq->queue->backing_dev_info->capabilities |= + BDI_CAP_STABLE_WRITES; + mq->queue->queuedata = mq; blk_queue_rq_timeout(mq->queue, 60 * HZ); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index faaaf52a46d2..eea52e2c5a0c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv) * delayed. Allowing the transfer to take place * avoids races and keeps things simple. */ - if ((err != -ETIMEDOUT) && - (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { + if (err != -ETIMEDOUT) { state = STATE_SENDING_DATA; continue; } diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 2d736e416775..ba9a63db73da 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -73,7 +73,7 @@ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) - #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10) #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 6ee340a3fb3a..603a5d9f045a 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -624,6 +624,7 @@ err_cleanup_host: sdhci_cleanup_host(host); pm_runtime_disable: + pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig index cff6bbd226f5..b4e3caf7d799 100644 --- a/drivers/mtd/hyperbus/Kconfig +++ b/drivers/mtd/hyperbus/Kconfig @@ -14,8 +14,9 @@ if MTD_HYPERBUS config HBMC_AM654 tristate "HyperBus controller driver for AM65x SoC" + depends on ARM64 || COMPILE_TEST select MULTIPLEXER - select MUX_MMIO + imply MUX_MMIO help This is the driver for HyperBus controller on TI's AM65x and other SoCs diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index a1f8fe1abb10..e082d632fb74 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -3259,6 +3259,7 @@ static void onenand_check_features(struct mtd_info *mtd) switch (density) { case ONENAND_DEVICE_DENSITY_8Gb: this->options |= ONENAND_HAS_NOP_1; + /* fall through */ case ONENAND_DEVICE_DENSITY_4Gb: if (ONENAND_IS_DDP(this)) this->options |= ONENAND_HAS_2PLANE; diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 1622d3145587..8ca9fad6e6ad 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -390,6 +390,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip) (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2) return MICRON_ON_DIE_UNSUPPORTED; + /* + * It seems that there are devices which do not support ECC officially. + * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports + * enabling the ECC feature but don't reflect that to the READ_ID table. + * So we have to guarantee that we disable the ECC feature directly + * after we did the READ_ID table command. Later we can evaluate the + * ECC_ENABLE support. + */ ret = micron_nand_on_die_ecc_setup(chip, true); if (ret) return MICRON_ON_DIE_UNSUPPORTED; @@ -398,13 +406,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip) if (ret) return MICRON_ON_DIE_UNSUPPORTED; - if (!(id[4] & MICRON_ID_ECC_ENABLED)) - return MICRON_ON_DIE_UNSUPPORTED; - ret = micron_nand_on_die_ecc_setup(chip, false); if (ret) return MICRON_ON_DIE_UNSUPPORTED; + if (!(id[4] & MICRON_ID_ECC_ENABLED)) + return MICRON_ON_DIE_UNSUPPORTED; + ret = nand_readid_op(chip, 0, id, sizeof(id)); if (ret) return MICRON_ON_DIE_UNSUPPORTED; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 1d4075903971..c8e1a04ba384 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -898,7 +898,8 @@ static void at91_irq_err_state(struct net_device *dev, CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } - case CAN_STATE_ERROR_WARNING: /* fallthrough */ + /* fall through */ + case CAN_STATE_ERROR_WARNING: /* * from: ERROR_ACTIVE, ERROR_WARNING * to : ERROR_PASSIVE, BUS_OFF @@ -947,7 +948,8 @@ static void at91_irq_err_state(struct net_device *dev, netdev_dbg(dev, "Error Active\n"); cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; - case CAN_STATE_ERROR_WARNING: /* fallthrough */ + /* fall through */ + case CAN_STATE_ERROR_WARNING: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = AT91_IRQ_ERRP; break; diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 7f6a3b971da9..13b10cbf236a 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -660,7 +660,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd) pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, PCIEFD_REG_CAN_CLK_SEL); - /* fallthough */ + /* fall through */ case CANFD_CLK_SEL_80MHZ: priv->ucan.can.clock.freq = 80 * 1000 * 1000; break; diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 44e99e3d7134..234cf1042df6 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -860,7 +860,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_warning++; - case CAN_STATE_ERROR_WARNING: /* fallthrough */ + /* fall through */ + case CAN_STATE_ERROR_WARNING: if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_passive++; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 15ce5ad1d632..617da295b6c1 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -415,7 +415,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, new_state = CAN_STATE_ERROR_WARNING; break; } - /* else: fall through */ + /* fall through */ case CAN_STATE_ERROR_WARNING: if (n & PCAN_USB_ERROR_BUS_HEAVY) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 656ed80647f0..e2be5a685130 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -285,6 +285,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) hw_cons = le16_to_cpu(*txdata->tx_cons_sb); sw_cons = txdata->tx_pkt_cons; + /* Ensure subsequent loads occur after hw_cons */ + smp_rmb(); + while (sw_cons != hw_cons) { u16 pkt_cons; diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c index 20c09cc4b323..60aa45b375b6 100644 --- a/drivers/net/ethernet/chelsio/cxgb/my3126.c +++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c @@ -94,7 +94,7 @@ static int my3126_interrupt_handler(struct cphy *cphy) return cphy_cause_link_change; } -static void my3216_poll(struct work_struct *work) +static void my3126_poll(struct work_struct *work) { struct cphy *cphy = container_of(work, struct cphy, phy_update.work); @@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(struct net_device *dev, return NULL; cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops); - INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); + INIT_DELAYED_WORK(&cphy->phy_update, my3126_poll); cphy->bmsr = 0; return cphy; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 67202b6f352e..4311ad9c84b2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5561,7 +5561,6 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) char name[IFNAMSIZ]; u32 devcap2; u16 flags; - int pos; /* If we want to instantiate Virtual Functions, then our * parent bridge's PCI-E needs to support Alternative Routing @@ -5569,9 +5568,8 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) * and above. */ pbridge = pdev->bus->self; - pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP); - pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags); - pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2); + pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags); + pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2); if ((flags & PCI_EXP_FLAGS_VERS) < 2 || !(devcap2 & PCI_EXP_DEVCAP2_ARI)) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 312599c6b35a..e447976bdd3e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = { static struct ch_tc_flower_entry *allocate_flower_entry(void) { struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); - spin_lock_init(&new->lock); + if (new) + spin_lock_init(&new->lock); return new; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 9dd5ed9a2965..f7fc553356f2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7309,7 +7309,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, } else { unsigned int pack_align; unsigned int ingpad, ingpack; - unsigned int pcie_cap; /* T5 introduced the separation of the Free List Padding and * Packing Boundaries. Thus, we can select a smaller Padding @@ -7334,8 +7333,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, * multiple of the Maximum Payload Size. */ pack_align = fl_align; - pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP); - if (pcie_cap) { + if (pci_is_pcie(adap->pdev)) { unsigned int mps, mps_log; u16 devctl; @@ -7343,9 +7341,8 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, * [bits 7:5] encodes sizes as powers of 2 starting at * 128 bytes. */ - pci_read_config_word(adap->pdev, - pcie_cap + PCI_EXP_DEVCTL, - &devctl); + pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, + &devctl); mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; mps = 1 << mps_log; if (mps > pack_align) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index b7a246b33599..2edb86ec9fe9 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4698,8 +4698,13 @@ int be_update_queues(struct be_adapter *adapter) int status; if (netif_running(netdev)) { + /* be_tx_timeout() must not run concurrently with this + * function, synchronize with an already-running dev_watchdog + */ + netif_tx_lock_bh(netdev); /* device cannot transmit now, avoid dev_watchdog timeouts */ netif_carrier_off(netdev); + netif_tx_unlock_bh(netdev); be_close(netdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 8ad5292eebbe..75329ab775a6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -43,7 +43,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ - HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ + HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index a38ac7cfe16b..690b9990215c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -304,7 +304,7 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, memcpy(&msg_data[6], &vlan_tag, sizeof(u16)); return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), - HLCGE_MBX_PUSH_VLAN_INFO, vfid); + HCLGE_MBX_PUSH_VLAN_INFO, vfid); } static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index f60b80bd605e..6a96987bd8f0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -204,7 +204,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_LINK_STAT_MODE: - case HLCGE_MBX_PUSH_VLAN_INFO: + case HCLGE_MBX_PUSH_VLAN_INFO: /* set this mbx event as pending. This is required as we * might loose interrupt event when mbx task is busy * handling. This shall be cleared when mbx task just @@ -307,7 +307,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) hclgevf_reset_task_schedule(hdev); break; - case HLCGE_MBX_PUSH_VLAN_INFO: + case HCLGE_MBX_PUSH_VLAN_INFO: state = le16_to_cpu(msg_q[1]); vlan_info = &msg_q[1]; hclgevf_update_port_base_vlan_info(hdev, state, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 93f3b4e6185b..aa9323e55406 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3912,13 +3912,11 @@ void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) { struct igc_adapter *adapter = hw->back; - u16 cap_offset; - cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); - if (!cap_offset) + if (!pci_is_pcie(adapter->pdev)) return -IGC_ERR_CONFIG; - pci_read_config_word(adapter->pdev, cap_offset + reg, value); + pcie_capability_read_word(adapter->pdev, reg, value); return IGC_SUCCESS; } @@ -3926,13 +3924,11 @@ s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) { struct igc_adapter *adapter = hw->back; - u16 cap_offset; - cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); - if (!cap_offset) + if (!pci_is_pcie(adapter->pdev)) return -IGC_ERR_CONFIG; - pci_write_config_word(adapter->pdev, cap_offset + reg, *value); + pcie_capability_write_word(adapter->pdev, reg, *value); return IGC_SUCCESS; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7245d287633d..7f747cb1a4f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -735,8 +735,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, list_add(&indr_priv->list, &rpriv->uplink_priv.tc_indr_block_priv_list); - block_cb = flow_block_cb_alloc(f->net, - mlx5e_rep_indr_setup_block_cb, + block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb, indr_priv, indr_priv, mlx5e_rep_indr_tc_block_unbind); if (IS_ERR(block_cb)) { @@ -753,7 +752,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, if (!indr_priv) return -ENOENT; - block_cb = flow_block_cb_lookup(f, + block_cb = flow_block_cb_lookup(f->block, mlx5e_rep_indr_setup_block_cb, indr_priv); if (!block_cb) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4d34d42b3b0e..650638152bbc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1604,14 +1604,14 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, bool register_block = false; int err; - block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, + block_cb = flow_block_cb_lookup(f->block, + mlxsw_sp_setup_tc_block_cb_flower, mlxsw_sp); if (!block_cb) { acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); if (!acl_block) return -ENOMEM; - block_cb = flow_block_cb_alloc(f->net, - mlxsw_sp_setup_tc_block_cb_flower, + block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, mlxsw_sp, acl_block, mlxsw_sp_tc_block_flower_release); if (IS_ERR(block_cb)) { @@ -1657,7 +1657,8 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, struct flow_block_cb *block_cb; int err; - block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, + block_cb = flow_block_cb_lookup(f->block, + mlxsw_sp_setup_tc_block_cb_flower, mlxsw_sp); if (!block_cb) return; @@ -1680,7 +1681,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, struct flow_block_offload *f) { struct flow_block_cb *block_cb; - tc_setup_cb_t *cb; + flow_setup_cb_t *cb; bool ingress; int err; @@ -1702,7 +1703,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, &mlxsw_sp_block_cb_list)) return -EBUSY; - block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port, + block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, mlxsw_sp_port, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); @@ -1718,7 +1719,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, case FLOW_BLOCK_UNBIND: mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, f, ingress); - block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port); + block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); if (!block_cb) return -ENOENT; diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 7aaddc09c185..59487d446a09 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -316,15 +316,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port, if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) return -EOPNOTSUPP; - block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, - port); + block_cb = flow_block_cb_lookup(f->block, + ocelot_setup_tc_block_cb_flower, port); if (!block_cb) { port_block = ocelot_port_block_create(port); if (!port_block) return -ENOMEM; - block_cb = flow_block_cb_alloc(f->net, - ocelot_setup_tc_block_cb_flower, + block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower, port, port_block, ocelot_tc_block_unbind); if (IS_ERR(block_cb)) { @@ -351,8 +350,8 @@ void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port, { struct flow_block_cb *block_cb; - block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, - port); + block_cb = flow_block_cb_lookup(f->block, + ocelot_setup_tc_block_cb_flower, port); if (!block_cb) return; diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c index 9e6464ffae5d..16a6db71ca5e 100644 --- a/drivers/net/ethernet/mscc/ocelot_tc.c +++ b/drivers/net/ethernet/mscc/ocelot_tc.c @@ -134,7 +134,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, struct flow_block_offload *f) { struct flow_block_cb *block_cb; - tc_setup_cb_t *cb; + flow_setup_cb_t *cb; int err; netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n", @@ -156,7 +156,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list)) return -EBUSY; - block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL); + block_cb = flow_block_cb_alloc(cb, port, port, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); @@ -169,7 +169,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, list_add_tail(&block_cb->driver_list, f->driver_block_list); return 0; case FLOW_BLOCK_UNBIND: - block_cb = flow_block_cb_lookup(f, cb, port); + block_cb = flow_block_cb_lookup(f->block, cb, port); if (!block_cb) return -ENOENT; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index faa8ba012a37..e209f150c5f2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1318,8 +1318,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, &nfp_block_cb_list)) return -EBUSY; - block_cb = flow_block_cb_alloc(f->net, - nfp_flower_setup_tc_block_cb, + block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb, repr, repr, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); @@ -1328,7 +1327,8 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); return 0; case FLOW_BLOCK_UNBIND: - block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb, + block_cb = flow_block_cb_lookup(f->block, + nfp_flower_setup_tc_block_cb, repr); if (!block_cb) return -ENOENT; @@ -1424,8 +1424,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, cb_priv->app = app; list_add(&cb_priv->list, &priv->indr_block_cb_priv); - block_cb = flow_block_cb_alloc(f->net, - nfp_flower_setup_indr_block_cb, + block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb, cb_priv, cb_priv, nfp_flower_setup_indr_tc_release); if (IS_ERR(block_cb)) { @@ -1442,7 +1441,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, if (!cb_priv) return -ENOENT; - block_cb = flow_block_cb_lookup(f, + block_cb = flow_block_cb_lookup(f->block, nfp_flower_setup_indr_block_cb, cb_priv); if (!block_cb) diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index f900fde448db..17c64e43d6c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -530,9 +530,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); /* Check atomic operations support in PCI configuration space. */ - pci_read_config_dword(cdev->pdev, - cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, - &pci_status_control); + pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2, + &pci_status_control); if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0637c6752a78..6272115b2848 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -3251,9 +3251,9 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) ret = phy_read_paged(tp->phydev, 0x0a46, 0x13); if (ret & BIT(8)) - phy_modify_paged(tp->phydev, 0x0c41, 0x12, 0, BIT(1)); + phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1)); else - phy_modify_paged(tp->phydev, 0x0c41, 0x12, BIT(1), 0); + phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0); /* Enable PHY auto speed down */ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index afdcc5664ea6..3544e1991579 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -836,7 +836,6 @@ int netvsc_recv_callback(struct net_device *net, if (unlikely(!skb)) { ++net_device_ctx->eth_stats.rx_no_memory; - rcu_read_unlock(); return NVSP_STAT_FAIL; } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 2d816aadea79..e36c04c26866 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -517,7 +517,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value) static void sfp_hwmon_to_rx_power(long *value) { - *value = DIV_ROUND_CLOSEST(*value, 100); + *value = DIV_ROUND_CLOSEST(*value, 10); } static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset, diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 54edf8956a25..6e84328bdd40 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -165,23 +165,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk, static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, struct net_device *dev) { - const struct ipv6hdr *iph = ipv6_hdr(skb); + const struct ipv6hdr *iph; struct net *net = dev_net(skb->dev); - struct flowi6 fl6 = { - /* needed to match OIF rule */ - .flowi6_oif = dev->ifindex, - .flowi6_iif = LOOPBACK_IFINDEX, - .daddr = iph->daddr, - .saddr = iph->saddr, - .flowlabel = ip6_flowinfo(iph), - .flowi6_mark = skb->mark, - .flowi6_proto = iph->nexthdr, - .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF, - }; + struct flowi6 fl6; int ret = NET_XMIT_DROP; struct dst_entry *dst; struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) + goto err; + + iph = ipv6_hdr(skb); + + memset(&fl6, 0, sizeof(fl6)); + /* needed to match OIF rule */ + fl6.flowi6_oif = dev->ifindex; + fl6.flowi6_iif = LOOPBACK_IFINDEX; + fl6.daddr = iph->daddr; + fl6.saddr = iph->saddr; + fl6.flowlabel = ip6_flowinfo(iph); + fl6.flowi6_mark = skb->mark; + fl6.flowi6_proto = iph->nexthdr; + fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; + dst = ip6_route_output(net, NULL, &fl6); if (dst == dst_null) goto err; @@ -237,21 +243,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk, static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, struct net_device *vrf_dev) { - struct iphdr *ip4h = ip_hdr(skb); + struct iphdr *ip4h; int ret = NET_XMIT_DROP; - struct flowi4 fl4 = { - /* needed to match OIF rule */ - .flowi4_oif = vrf_dev->ifindex, - .flowi4_iif = LOOPBACK_IFINDEX, - .flowi4_tos = RT_TOS(ip4h->tos), - .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF, - .flowi4_proto = ip4h->protocol, - .daddr = ip4h->daddr, - .saddr = ip4h->saddr, - }; + struct flowi4 fl4; struct net *net = dev_net(vrf_dev); struct rtable *rt; + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) + goto err; + + ip4h = ip_hdr(skb); + + memset(&fl4, 0, sizeof(fl4)); + /* needed to match OIF rule */ + fl4.flowi4_oif = vrf_dev->ifindex; + fl4.flowi4_iif = LOOPBACK_IFINDEX; + fl4.flowi4_tos = RT_TOS(ip4h->tos); + fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF; + fl4.flowi4_proto = ip4h->protocol; + fl4.daddr = ip4h->daddr; + fl4.saddr = ip4h->saddr; + rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index d436cc51dfd1..2fb4258941a5 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -177,6 +177,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .policy = wil_rf_sector_policy, .doit = wil_rf_sector_get_cfg }, { @@ -184,6 +185,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .policy = wil_rf_sector_policy, .doit = wil_rf_sector_set_cfg }, { @@ -192,6 +194,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .policy = wil_rf_sector_policy, .doit = wil_rf_sector_get_selected }, { @@ -200,6 +203,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, + .policy = wil_rf_sector_policy, .doit = wil_rf_sector_set_selected }, }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c index f6500899fc14..d07e7c7355d9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c @@ -112,6 +112,7 @@ const struct wiphy_vendor_command brcmf_vendor_cmds[] = { }, .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .policy = VENDOR_CMD_RAW_DATA, .doit = brcmf_cfg80211_vndr_cmds_dcmd_handler }, }; diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index 5cf0b32c413b..e1bd344c4ebc 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -163,6 +163,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = { .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = wlcore_vendor_cmd_smart_config_start, + .policy = wlcore_vendor_attr_policy, }, { .info = { @@ -172,6 +173,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = { .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = wlcore_vendor_cmd_smart_config_stop, + .policy = wlcore_vendor_attr_policy, }, { .info = { @@ -181,6 +183,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = { .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = wlcore_vendor_cmd_smart_config_set_group_key, + .policy = wlcore_vendor_attr_policy, }, }; diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 62d00fffa4af..3508a79110c7 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -62,14 +62,14 @@ static ssize_t sector_size_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, btt_lbasize_supported); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -91,11 +91,11 @@ static ssize_t uuid_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -120,13 +120,13 @@ static ssize_t namespace_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -138,14 +138,14 @@ static ssize_t size_show(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (dev->driver) rc = sprintf(buf, "%llu\n", nd_btt->size); else { /* no size to convey if the btt instance is disabled */ rc = -ENXIO; } - device_unlock(dev); + nd_device_unlock(dev); return rc; } diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 2dca3034fee0..798c5c4aea9c 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -26,7 +26,7 @@ int nvdimm_major; static int nvdimm_bus_major; -static struct class *nd_class; +struct class *nd_class; static DEFINE_IDA(nd_ida); static int to_nd_device_type(struct device *dev) @@ -73,7 +73,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) { nvdimm_bus_lock(&nvdimm_bus->dev); if (--nvdimm_bus->probe_active == 0) - wake_up(&nvdimm_bus->probe_wait); + wake_up(&nvdimm_bus->wait); nvdimm_bus_unlock(&nvdimm_bus->dev); } @@ -91,7 +91,10 @@ static int nvdimm_bus_probe(struct device *dev) dev->driver->name, dev_name(dev)); nvdimm_bus_probe_start(nvdimm_bus); + debug_nvdimm_lock(dev); rc = nd_drv->probe(dev); + debug_nvdimm_unlock(dev); + if (rc == 0) nd_region_probe_success(nvdimm_bus, dev); else @@ -113,8 +116,11 @@ static int nvdimm_bus_remove(struct device *dev) struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); int rc = 0; - if (nd_drv->remove) + if (nd_drv->remove) { + debug_nvdimm_lock(dev); rc = nd_drv->remove(dev); + debug_nvdimm_unlock(dev); + } nd_region_disable(nvdimm_bus, dev); dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name, @@ -140,7 +146,7 @@ static void nvdimm_bus_shutdown(struct device *dev) void nd_device_notify(struct device *dev, enum nvdimm_event event) { - device_lock(dev); + nd_device_lock(dev); if (dev->driver) { struct nd_device_driver *nd_drv; @@ -148,7 +154,7 @@ void nd_device_notify(struct device *dev, enum nvdimm_event event) if (nd_drv->notify) nd_drv->notify(dev, event); } - device_unlock(dev); + nd_device_unlock(dev); } EXPORT_SYMBOL(nd_device_notify); @@ -296,7 +302,7 @@ static void nvdimm_bus_release(struct device *dev) kfree(nvdimm_bus); } -static bool is_nvdimm_bus(struct device *dev) +bool is_nvdimm_bus(struct device *dev) { return dev->release == nvdimm_bus_release; } @@ -341,7 +347,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent, return NULL; INIT_LIST_HEAD(&nvdimm_bus->list); INIT_LIST_HEAD(&nvdimm_bus->mapping_list); - init_waitqueue_head(&nvdimm_bus->probe_wait); + init_waitqueue_head(&nvdimm_bus->wait); nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); if (nvdimm_bus->id < 0) { kfree(nvdimm_bus); @@ -426,6 +432,9 @@ static int nd_bus_remove(struct device *dev) list_del_init(&nvdimm_bus->list); mutex_unlock(&nvdimm_bus_list_mutex); + wait_event(nvdimm_bus->wait, + atomic_read(&nvdimm_bus->ioctl_active) == 0); + nd_synchronize(); device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); @@ -547,13 +556,38 @@ EXPORT_SYMBOL(nd_device_register); void nd_device_unregister(struct device *dev, enum nd_async_mode mode) { + bool killed; + switch (mode) { case ND_ASYNC: + /* + * In the async case this is being triggered with the + * device lock held and the unregistration work needs to + * be moved out of line iff this is thread has won the + * race to schedule the deletion. + */ + if (!kill_device(dev)) + return; + get_device(dev); async_schedule_domain(nd_async_device_unregister, dev, &nd_async_domain); break; case ND_SYNC: + /* + * In the sync case the device is being unregistered due + * to a state change of the parent. Claim the kill state + * to synchronize against other unregistration requests, + * or otherwise let the async path handle it if the + * unregistration was already queued. + */ + nd_device_lock(dev); + killed = kill_device(dev); + nd_device_unlock(dev); + + if (!killed) + return; + nd_synchronize(); device_unregister(dev); break; @@ -859,10 +893,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev) do { if (nvdimm_bus->probe_active == 0) break; - nvdimm_bus_unlock(&nvdimm_bus->dev); - wait_event(nvdimm_bus->probe_wait, + nvdimm_bus_unlock(dev); + nd_device_unlock(dev); + wait_event(nvdimm_bus->wait, nvdimm_bus->probe_active == 0); - nvdimm_bus_lock(&nvdimm_bus->dev); + nd_device_lock(dev); + nvdimm_bus_lock(dev); } while (true); } @@ -945,20 +981,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, int read_only, unsigned int ioctl_cmd, unsigned long arg) { struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; - static char out_env[ND_CMD_MAX_ENVELOPE]; - static char in_env[ND_CMD_MAX_ENVELOPE]; const struct nd_cmd_desc *desc = NULL; unsigned int cmd = _IOC_NR(ioctl_cmd); struct device *dev = &nvdimm_bus->dev; void __user *p = (void __user *) arg; + char *out_env = NULL, *in_env = NULL; const char *cmd_name, *dimm_name; u32 in_len = 0, out_len = 0; unsigned int func = cmd; unsigned long cmd_mask; struct nd_cmd_pkg pkg; int rc, i, cmd_rc; + void *buf = NULL; u64 buf_len = 0; - void *buf; if (nvdimm) { desc = nd_cmd_dimm_desc(cmd); @@ -989,7 +1024,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, case ND_CMD_ARS_START: case ND_CMD_CLEAR_ERROR: case ND_CMD_CALL: - dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n", + dev_dbg(dev, "'%s' command while read-only.\n", nvdimm ? nvdimm_cmd_name(cmd) : nvdimm_bus_cmd_name(cmd)); return -EPERM; @@ -998,6 +1033,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } /* process an input envelope */ + in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); + if (!in_env) + return -ENOMEM; for (i = 0; i < desc->in_num; i++) { u32 in_size, copy; @@ -1005,14 +1043,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (in_size == UINT_MAX) { dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", __func__, dimm_name, cmd_name, i); - return -ENXIO; + rc = -ENXIO; + goto out; } - if (in_len < sizeof(in_env)) - copy = min_t(u32, sizeof(in_env) - in_len, in_size); + if (in_len < ND_CMD_MAX_ENVELOPE) + copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size); else copy = 0; - if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) - return -EFAULT; + if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) { + rc = -EFAULT; + goto out; + } in_len += in_size; } @@ -1024,6 +1065,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } /* process an output envelope */ + out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); + if (!out_env) { + rc = -ENOMEM; + goto out; + } + for (i = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, (u32 *) in_env, (u32 *) out_env, 0); @@ -1032,15 +1079,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (out_size == UINT_MAX) { dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", dimm_name, cmd_name, i); - return -EFAULT; + rc = -EFAULT; + goto out; } - if (out_len < sizeof(out_env)) - copy = min_t(u32, sizeof(out_env) - out_len, out_size); + if (out_len < ND_CMD_MAX_ENVELOPE) + copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size); else copy = 0; if (copy && copy_from_user(&out_env[out_len], - p + in_len + out_len, copy)) - return -EFAULT; + p + in_len + out_len, copy)) { + rc = -EFAULT; + goto out; + } out_len += out_size; } @@ -1048,19 +1098,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (buf_len > ND_IOCTL_MAX_BUFLEN) { dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); - return -EINVAL; + rc = -EINVAL; + goto out; } buf = vmalloc(buf_len); - if (!buf) - return -ENOMEM; + if (!buf) { + rc = -ENOMEM; + goto out; + } if (copy_from_user(buf, p, buf_len)) { rc = -EFAULT; goto out; } - nvdimm_bus_lock(&nvdimm_bus->dev); + nd_device_lock(dev); + nvdimm_bus_lock(dev); rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); if (rc) goto out_unlock; @@ -1075,39 +1129,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address, clear_err->cleared); } - nvdimm_bus_unlock(&nvdimm_bus->dev); if (copy_to_user(p, buf, buf_len)) rc = -EFAULT; - vfree(buf); - return rc; - - out_unlock: - nvdimm_bus_unlock(&nvdimm_bus->dev); - out: +out_unlock: + nvdimm_bus_unlock(dev); + nd_device_unlock(dev); +out: + kfree(in_env); + kfree(out_env); vfree(buf); return rc; } -static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - long id = (long) file->private_data; - int rc = -ENXIO, ro; - struct nvdimm_bus *nvdimm_bus; - - ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); - mutex_lock(&nvdimm_bus_list_mutex); - list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { - if (nvdimm_bus->id == id) { - rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg); - break; - } - } - mutex_unlock(&nvdimm_bus_list_mutex); - - return rc; -} +enum nd_ioctl_mode { + BUS_IOCTL, + DIMM_IOCTL, +}; static int match_dimm(struct device *dev, void *data) { @@ -1122,31 +1161,62 @@ static int match_dimm(struct device *dev, void *data) return 0; } -static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg, + enum nd_ioctl_mode mode) + { - int rc = -ENXIO, ro; - struct nvdimm_bus *nvdimm_bus; + struct nvdimm_bus *nvdimm_bus, *found = NULL; + long id = (long) file->private_data; + struct nvdimm *nvdimm = NULL; + int rc, ro; ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); mutex_lock(&nvdimm_bus_list_mutex); list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { - struct device *dev = device_find_child(&nvdimm_bus->dev, - file->private_data, match_dimm); - struct nvdimm *nvdimm; - - if (!dev) - continue; + if (mode == DIMM_IOCTL) { + struct device *dev; + + dev = device_find_child(&nvdimm_bus->dev, + file->private_data, match_dimm); + if (!dev) + continue; + nvdimm = to_nvdimm(dev); + found = nvdimm_bus; + } else if (nvdimm_bus->id == id) { + found = nvdimm_bus; + } - nvdimm = to_nvdimm(dev); - rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); - put_device(dev); - break; + if (found) { + atomic_inc(&nvdimm_bus->ioctl_active); + break; + } } mutex_unlock(&nvdimm_bus_list_mutex); + if (!found) + return -ENXIO; + + nvdimm_bus = found; + rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); + + if (nvdimm) + put_device(&nvdimm->dev); + if (atomic_dec_and_test(&nvdimm_bus->ioctl_active)) + wake_up(&nvdimm_bus->wait); + return rc; } +static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return nd_ioctl(file, cmd, arg, BUS_IOCTL); +} + +static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return nd_ioctl(file, cmd, arg, DIMM_IOCTL); +} + static int nd_open(struct inode *inode, struct file *file) { long minor = iminor(inode); @@ -1158,16 +1228,16 @@ static int nd_open(struct inode *inode, struct file *file) static const struct file_operations nvdimm_bus_fops = { .owner = THIS_MODULE, .open = nd_open, - .unlocked_ioctl = nd_ioctl, - .compat_ioctl = nd_ioctl, + .unlocked_ioctl = bus_ioctl, + .compat_ioctl = bus_ioctl, .llseek = noop_llseek, }; static const struct file_operations nvdimm_fops = { .owner = THIS_MODULE, .open = nd_open, - .unlocked_ioctl = nvdimm_ioctl, - .compat_ioctl = nvdimm_ioctl, + .unlocked_ioctl = dimm_ioctl, + .compat_ioctl = dimm_ioctl, .llseek = noop_llseek, }; diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 5e1f060547bf..9204f1e9fd14 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -246,7 +246,7 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf, * * Enforce that uuids can only be changed while the device is disabled * (driver detached) - * LOCKING: expects device_lock() is held on entry + * LOCKING: expects nd_device_lock() is held on entry */ int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, size_t len) @@ -347,15 +347,15 @@ static DEVICE_ATTR_RO(provider); static int flush_namespaces(struct device *dev, void *data) { - device_lock(dev); - device_unlock(dev); + nd_device_lock(dev); + nd_device_unlock(dev); return 0; } static int flush_regions_dimms(struct device *dev, void *data) { - device_lock(dev); - device_unlock(dev); + nd_device_lock(dev); + nd_device_unlock(dev); device_for_each_child(dev, NULL, flush_namespaces); return 0; } diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index dfecd6e17043..29a065e769ea 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -484,12 +484,12 @@ static ssize_t security_store(struct device *dev, * done while probing is idle and the DIMM is not in active use * in any region. */ - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __security_store(dev, buf, len); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc; } diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 2d8d7e554877..a16e52251a30 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -410,7 +410,7 @@ static ssize_t alt_name_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __alt_name_store(dev, buf, len); @@ -418,7 +418,7 @@ static ssize_t alt_name_store(struct device *dev, rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc < 0 ? rc : len; } @@ -1077,7 +1077,7 @@ static ssize_t size_store(struct device *dev, if (rc) return rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __size_store(dev, val); @@ -1103,7 +1103,7 @@ static ssize_t size_store(struct device *dev, dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc < 0 ? rc : len; } @@ -1286,7 +1286,7 @@ static ssize_t uuid_store(struct device *dev, } else return -ENXIO; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); if (to_ndns(dev)->claim) @@ -1302,7 +1302,7 @@ static ssize_t uuid_store(struct device *dev, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc < 0 ? rc : len; } @@ -1376,7 +1376,7 @@ static ssize_t sector_size_store(struct device *dev, } else return -ENXIO; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); if (to_ndns(dev)->claim) rc = -EBUSY; @@ -1387,7 +1387,7 @@ static ssize_t sector_size_store(struct device *dev, dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -1502,9 +1502,9 @@ static ssize_t holder_show(struct device *dev, struct nd_namespace_common *ndns = to_ndns(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -1541,7 +1541,7 @@ static ssize_t holder_class_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __holder_class_store(dev, buf); @@ -1549,7 +1549,7 @@ static ssize_t holder_class_store(struct device *dev, rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc < 0 ? rc : len; } @@ -1560,7 +1560,7 @@ static ssize_t holder_class_show(struct device *dev, struct nd_namespace_common *ndns = to_ndns(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (ndns->claim_class == NVDIMM_CCLASS_NONE) rc = sprintf(buf, "\n"); else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) || @@ -1572,7 +1572,7 @@ static ssize_t holder_class_show(struct device *dev, rc = sprintf(buf, "dax\n"); else rc = sprintf(buf, "<unknown>\n"); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -1586,7 +1586,7 @@ static ssize_t mode_show(struct device *dev, char *mode; ssize_t rc; - device_lock(dev); + nd_device_lock(dev); claim = ndns->claim; if (claim && is_nd_btt(claim)) mode = "safe"; @@ -1599,7 +1599,7 @@ static ssize_t mode_show(struct device *dev, else mode = "raw"; rc = sprintf(buf, "%s\n", mode); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -1703,8 +1703,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) * Flush any in-progess probes / removals in the driver * for the raw personality of this namespace. */ - device_lock(&ndns->dev); - device_unlock(&ndns->dev); + nd_device_lock(&ndns->dev); + nd_device_unlock(&ndns->dev); if (ndns->dev.driver) { dev_dbg(&ndns->dev, "is active, can't bind %s\n", dev_name(dev)); diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 391e88de3a29..0ac52b6eb00e 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -9,6 +9,7 @@ #include <linux/sizes.h> #include <linux/mutex.h> #include <linux/nd.h> +#include "nd.h" extern struct list_head nvdimm_bus_list; extern struct mutex nvdimm_bus_list_mutex; @@ -17,10 +18,11 @@ extern struct workqueue_struct *nvdimm_wq; struct nvdimm_bus { struct nvdimm_bus_descriptor *nd_desc; - wait_queue_head_t probe_wait; + wait_queue_head_t wait; struct list_head list; struct device dev; int id, probe_active; + atomic_t ioctl_active; struct list_head mapping_list; struct mutex reconfig_mutex; struct badrange badrange; @@ -181,4 +183,71 @@ ssize_t nd_namespace_store(struct device *dev, struct nd_namespace_common **_ndns, const char *buf, size_t len); struct nd_pfn *to_nd_pfn_safe(struct device *dev); +bool is_nvdimm_bus(struct device *dev); + +#ifdef CONFIG_PROVE_LOCKING +extern struct class *nd_class; + +enum { + LOCK_BUS, + LOCK_NDCTL, + LOCK_REGION, + LOCK_DIMM = LOCK_REGION, + LOCK_NAMESPACE, + LOCK_CLAIM, +}; + +static inline void debug_nvdimm_lock(struct device *dev) +{ + if (is_nd_region(dev)) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION); + else if (is_nvdimm(dev)) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM); + else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev)) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM); + else if (dev->parent && (is_nd_region(dev->parent))) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE); + else if (is_nvdimm_bus(dev)) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS); + else if (dev->class && dev->class == nd_class) + mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL); + else + dev_WARN(dev, "unknown lock level\n"); +} + +static inline void debug_nvdimm_unlock(struct device *dev) +{ + mutex_unlock(&dev->lockdep_mutex); +} + +static inline void nd_device_lock(struct device *dev) +{ + device_lock(dev); + debug_nvdimm_lock(dev); +} + +static inline void nd_device_unlock(struct device *dev) +{ + debug_nvdimm_unlock(dev); + device_unlock(dev); +} +#else +static inline void nd_device_lock(struct device *dev) +{ + device_lock(dev); +} + +static inline void nd_device_unlock(struct device *dev) +{ + device_unlock(dev); +} + +static inline void debug_nvdimm_lock(struct device *dev) +{ +} + +static inline void debug_nvdimm_unlock(struct device *dev) +{ +} +#endif #endif /* __ND_CORE_H__ */ diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index df2bdbd22450..3e7b11cf1aae 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -67,7 +67,7 @@ static ssize_t mode_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc = 0; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); if (dev->driver) rc = -EBUSY; @@ -89,7 +89,7 @@ static ssize_t mode_store(struct device *dev, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -132,14 +132,14 @@ static ssize_t align_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); rc = nd_size_select_store(dev, buf, &nd_pfn->align, nd_pfn_supported_alignments()); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -161,11 +161,11 @@ static ssize_t uuid_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - device_unlock(dev); + nd_device_unlock(dev); return rc ? rc : len; } @@ -190,13 +190,13 @@ static ssize_t namespace_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -208,7 +208,7 @@ static ssize_t resource_show(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (dev->driver) { struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; u64 offset = __le64_to_cpu(pfn_sb->dataoff); @@ -222,7 +222,7 @@ static ssize_t resource_show(struct device *dev, /* no address to convey if the pfn instance is disabled */ rc = -ENXIO; } - device_unlock(dev); + nd_device_unlock(dev); return rc; } @@ -234,7 +234,7 @@ static ssize_t size_show(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (dev->driver) { struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; u64 offset = __le64_to_cpu(pfn_sb->dataoff); @@ -250,7 +250,7 @@ static ssize_t size_show(struct device *dev, /* no size to convey if the pfn instance is disabled */ rc = -ENXIO; } - device_unlock(dev); + nd_device_unlock(dev); return rc; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 2bf3acd69613..4c121dd03dd9 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -522,8 +522,8 @@ static int nd_pmem_remove(struct device *dev) nvdimm_namespace_detach_btt(to_nd_btt(dev)); else { /* - * Note, this assumes device_lock() context to not race - * nd_pmem_notify() + * Note, this assumes nd_device_lock() context to not + * race nd_pmem_notify() */ sysfs_put(pmem->bb_state); pmem->bb_state = NULL; diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index ef46cc3a71ae..37bf8719a2a4 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -34,17 +34,6 @@ static int nd_region_probe(struct device *dev) if (rc) return rc; - rc = nd_region_register_namespaces(nd_region, &err); - if (rc < 0) - return rc; - - ndrd = dev_get_drvdata(dev); - ndrd->ns_active = rc; - ndrd->ns_count = rc + err; - - if (rc && err && rc == err) - return -ENODEV; - if (is_nd_pmem(&nd_region->dev)) { struct resource ndr_res; @@ -60,6 +49,17 @@ static int nd_region_probe(struct device *dev) nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res); } + rc = nd_region_register_namespaces(nd_region, &err); + if (rc < 0) + return rc; + + ndrd = dev_get_drvdata(dev); + ndrd->ns_active = rc; + ndrd->ns_count = rc + err; + + if (rc && err && rc == err) + return -ENODEV; + nd_region->btt_seed = nd_btt_create(nd_region); nd_region->pfn_seed = nd_pfn_create(nd_region); nd_region->dax_seed = nd_dax_create(nd_region); @@ -102,7 +102,7 @@ static int nd_region_remove(struct device *dev) nvdimm_bus_unlock(dev); /* - * Note, this assumes device_lock() context to not race + * Note, this assumes nd_device_lock() context to not race * nd_region_notify() */ sysfs_put(nd_region->bb_state); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 56f2227f192a..af30cbe7a8ea 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -331,7 +331,7 @@ static ssize_t set_cookie_show(struct device *dev, * the v1.1 namespace label cookie definition. To read all this * data we need to wait for probing to settle. */ - device_lock(dev); + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); if (nd_region->ndr_mappings) { @@ -348,7 +348,7 @@ static ssize_t set_cookie_show(struct device *dev, } } nvdimm_bus_unlock(dev); - device_unlock(dev); + nd_device_unlock(dev); if (rc) return rc; @@ -424,10 +424,12 @@ static ssize_t available_size_show(struct device *dev, * memory nvdimm_bus_lock() is dropped, but that's userspace's * problem to not race itself. */ + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_available_dpa(nd_region); nvdimm_bus_unlock(dev); + nd_device_unlock(dev); return sprintf(buf, "%llu\n", available); } @@ -439,10 +441,12 @@ static ssize_t max_available_extent_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); unsigned long long available = 0; + nd_device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_allocatable_dpa(nd_region); nvdimm_bus_unlock(dev); + nd_device_unlock(dev); return sprintf(buf, "%llu\n", available); } @@ -561,12 +565,12 @@ static ssize_t region_badblocks_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); ssize_t rc; - device_lock(dev); + nd_device_lock(dev); if (dev->driver) rc = badblocks_show(&nd_region->bb, buf, 0); else rc = -ENXIO; - device_unlock(dev); + nd_device_unlock(dev); return rc; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index cc09b81fc7f4..8f3fbe5ca937 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2311,17 +2311,15 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); } -static void __nvme_release_subsystem(struct nvme_subsystem *subsys) +static void nvme_release_subsystem(struct device *dev) { + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + ida_simple_remove(&nvme_subsystems_ida, subsys->instance); kfree(subsys); } -static void nvme_release_subsystem(struct device *dev) -{ - __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev)); -} - static void nvme_destroy_subsystem(struct kref *ref) { struct nvme_subsystem *subsys = @@ -2477,7 +2475,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) mutex_lock(&nvme_subsystems_lock); found = __nvme_find_get_subsystem(subsys->subnqn); if (found) { - __nvme_release_subsystem(subsys); + put_device(&subsys->dev); subsys = found; if (!nvme_validate_cntlid(subsys, ctrl, id)) { diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index a9a927677970..4f0d0d12744e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -12,11 +12,6 @@ module_param(multipath, bool, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); -inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) -{ - return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3)); -} - /* * If multipathing is enabled we need to always use the subsystem instance * number for numbering our devices to avoid conflicts between subsystems that @@ -622,7 +617,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { int error; - if (!nvme_ctrl_use_ana(ctrl)) + /* check if multipath is enabled and we have the capability */ + if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3))) return 0; ctrl->anacap = id->anacap; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 716a876119c8..26b563f9985b 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -485,7 +485,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[]; extern const struct block_device_operations nvme_ns_head_ops; #ifdef CONFIG_NVME_MULTIPATH -bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl); +static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) +{ + return ctrl->ana_log_buf != NULL; +} + void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); void nvme_failover_req(struct request *req); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bb970ca82517..db160cee42ad 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2254,9 +2254,7 @@ static int nvme_dev_add(struct nvme_dev *dev) if (!dev->ctrl.tagset) { dev->tagset.ops = &nvme_mq_ops; dev->tagset.nr_hw_queues = dev->online_queues - 1; - dev->tagset.nr_maps = 1; /* default */ - if (dev->io_queues[HCTX_TYPE_READ]) - dev->tagset.nr_maps++; + dev->tagset.nr_maps = 2; /* default + read */ if (dev->io_queues[HCTX_TYPE_POLL]) dev->tagset.nr_maps++; dev->tagset.timeout = NVME_IO_TIMEOUT; @@ -3029,6 +3027,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ .driver_data = NVME_QUIRK_LIGHTNVM, }, + { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 9cdf14b9aaab..223d617ecfe1 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -24,6 +24,7 @@ #include <linux/debugfs.h> #include <linux/serial_core.h> #include <linux/sysfs.h> +#include <linux/random.h> #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ #include <asm/page.h> @@ -1044,6 +1045,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, { int l; const char *p; + const void *rng_seed; pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); @@ -1078,6 +1080,18 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, pr_debug("Command line is: %s\n", (char*)data); + rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l); + if (rng_seed && l > 0) { + add_bootloader_randomness(rng_seed, l); + + /* try to clear seed so it won't be found. */ + fdt_nop_property(initial_boot_params, node, "rng-seed"); + + /* update CRC check value */ + of_fdt_crc32 = crc32_be(~0, initial_boot_params, + fdt_totalsize(initial_boot_params)); + } + /* break now */ return 1; } diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 2d06b8095a19..df352b334ea7 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, cpu_pm_pmu_setup(armpmu, cmd); break; case CPU_PM_EXIT: - cpu_pm_pmu_setup(armpmu, cmd); case CPU_PM_ENTER_FAILED: + cpu_pm_pmu_setup(armpmu, cmd); armpmu->start(armpmu); break; default: diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 48d6f0d87583..83ed1fbf73cf 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c @@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = { }; MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match); +static const struct spi_device_id olpc_xo175_ec_id_table[] = { + { "xo1.75-ec", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table); + static struct spi_driver olpc_xo175_ec_spi_driver = { .driver = { .name = "olpc-xo175-ec", diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 235c0b89f824..c510d0d72475 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -812,6 +812,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map), INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), + INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), {} }; diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c index b0d3110ae378..e4c68efac0c2 100644 --- a/drivers/platform/x86/pcengines-apuv2.c +++ b/drivers/platform/x86/pcengines-apuv2.c @@ -93,7 +93,7 @@ static struct gpiod_lookup_table gpios_led_table = { static struct gpio_keys_button apu2_keys_buttons[] = { { - .code = KEY_SETUP, + .code = KEY_RESTART, .active_low = 1, .desc = "front button", .type = EV_KEY, @@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); MODULE_ALIAS("platform:pcengines-apuv2"); -MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME); -MODULE_SOFTDEP("pre: platform:leds-gpio"); -MODULE_SOFTDEP("pre: platform:gpio_keys_polled"); +MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled"); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 9fd6dd342169..6df481896b5f 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1454,7 +1454,7 @@ static void __exit rapl_exit(void) unregister_pm_notifier(&rapl_pm_notifier); } -module_init(rapl_init); +fs_initcall(rapl_init); module_exit(rapl_exit); MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code"); diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 540e8aafc990..f808c5fa9838 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -671,7 +671,7 @@ static int __init powercap_init(void) return class_register(&powercap_class); } -device_initcall(powercap_init); +fs_initcall(powercap_init); MODULE_DESCRIPTION("PowerCap sysfs Driver"); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index b9ce93e9df89..99f86612f775 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr) char msg_format; char msg_no; + /* + * intrc values ENODEV, ENOLINK and EPERM + * will be optained from sleep_on to indicate that no + * IO operation can be started + */ + if (cqr->intrc == -ENODEV) + return 1; + + if (cqr->intrc == -ENOLINK) + return 1; + + if (cqr->intrc == -EPERM) + return 1; + sense = dasd_get_sense(&cqr->irb); if (!sense) return 0; @@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device, lcu->flags &= ~NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); - do { - rc = dasd_sleep_on(cqr); - if (rc && suborder_not_supported(cqr)) - return -EOPNOTSUPP; - } while (rc && (cqr->retries > 0)); - if (rc) { + rc = dasd_sleep_on(cqr); + if (rc && !suborder_not_supported(cqr)) { spin_lock_irqsave(&lcu->lock, flags); lcu->flags |= NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 8c9d412b6d33..e7cf0a1d4f71 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -398,6 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, } if (dstat == 0x08) break; + /* else, fall through */ case 0x04: /* Device end interrupt. */ if ((raw = req->info) == NULL) diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 8d3370da2dfc..3e0b2f63a9d2 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -677,6 +677,7 @@ tape_generic_remove(struct ccw_device *cdev) switch (device->tape_state) { case TS_INIT: tape_state_set(device, TS_NOT_OPER); + /* fallthrough */ case TS_NOT_OPER: /* * Nothing to do. @@ -949,6 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request) break; if (device->tape_state == TS_UNUSED) break; + /* fallthrough */ default: if (device->tape_state == TS_BLKUSE) break; @@ -1116,6 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) case -ETIMEDOUT: DBF_LH(1, "(%08x): Request timed out\n", device->cdev_id); + /* fallthrough */ case -EIO: __tape_end_request(device, request, -EIO); break; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 730c4e68094b..4142c85e77d8 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -319,9 +319,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, int retries = 0, cc; unsigned long laob = 0; - WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) || - !q->u.out.use_cq)); - if (q->u.out.use_cq && aob != 0) { + if (aob) { fc = QDIO_SIGA_WRITEQ; laob = aob; } @@ -621,9 +619,6 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, { unsigned long phys_aob = 0; - if (!q->use_cq) - return 0; - if (!q->aobs[bufnr]) { struct qaob *aob = qdio_allocate_aob(); q->aobs[bufnr] = aob; @@ -1308,6 +1303,8 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) for_each_output_queue(irq_ptr, q, i) { if (use_cq) { + if (multicast_outbound(q)) + continue; if (qdio_enable_async_operation(&q->u.out) < 0) { use_cq = 0; continue; @@ -1553,18 +1550,19 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, /* One SIGA-W per buffer required for unicast HSI */ WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); - phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); + if (q->u.out.use_cq) + phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); rc = qdio_kick_outbound_q(q, phys_aob); } else if (need_siga_sync(q)) { rc = qdio_siga_sync_q(q); + } else if (count < QDIO_MAX_BUFFERS_PER_Q && + get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 && + state == SLSB_CU_OUTPUT_PRIMED) { + /* The previous buffer is not processed yet, tack on. */ + qperf_inc(q, fast_requeue); } else { - /* try to fast requeue buffers */ - get_buf_state(q, prev_buf(bufnr), &state, 0); - if (state != SLSB_CU_OUTPUT_PRIMED) - rc = qdio_kick_outbound_q(q, 0); - else - qperf_inc(q, fast_requeue); + rc = qdio_kick_outbound_q(q, 0); } /* in case of SIGA errors we must process the error immediately */ diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c index 8c1d2357ef5b..7a838e3d7c0f 100644 --- a/drivers/s390/cio/vfio_ccw_async.c +++ b/drivers/s390/cio/vfio_ccw_async.c @@ -70,7 +70,7 @@ static void vfio_ccw_async_region_release(struct vfio_ccw_private *private, } -const struct vfio_ccw_regops vfio_ccw_async_region_ops = { +static const struct vfio_ccw_regops vfio_ccw_async_region_ops = { .read = vfio_ccw_async_region_read, .write = vfio_ccw_async_region_write, .release = vfio_ccw_async_region_release, diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 1d4c893ead23..3645d1720c4b 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -72,8 +72,10 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len) sizeof(*pa->pa_iova_pfn) + sizeof(*pa->pa_pfn), GFP_KERNEL); - if (unlikely(!pa->pa_iova_pfn)) + if (unlikely(!pa->pa_iova_pfn)) { + pa->pa_nr = 0; return -ENOMEM; + } pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; @@ -421,7 +423,7 @@ static int ccwchain_loop_tic(struct ccwchain *chain, static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) { struct ccwchain *chain; - int len; + int len, ret; /* Copy 2K (the most we support today) of possible CCWs */ len = copy_from_iova(cp->mdev, cp->guest_cp, cda, @@ -448,7 +450,12 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1)); /* Loop for tics on this new chain. */ - return ccwchain_loop_tic(chain, cp); + ret = ccwchain_loop_tic(chain, cp); + + if (ret) + ccwchain_free(chain); + + return ret; } /* Loop for TICs. */ @@ -642,17 +649,16 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) /* Build a ccwchain for the first CCW segment */ ret = ccwchain_handle_ccw(orb->cmd.cpa, cp); - if (ret) - cp_free(cp); - - /* It is safe to force: if not set but idals used - * ccwchain_calc_length returns an error. - */ - cp->orb.cmd.c64 = 1; - if (!ret) + if (!ret) { cp->initialized = true; + /* It is safe to force: if it was not set but idals used + * ccwchain_calc_length would have returned an error. + */ + cp->orb.cmd.c64 = 1; + } + return ret; } diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 2b90a5ecaeb9..9208c0e56c33 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -88,7 +88,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); if (scsw_is_solicited(&irb->scsw)) { cp_update_scsw(&private->cp, &irb->scsw); - if (is_final) + if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) cp_free(&private->cp); } mutex_lock(&private->io_mutex); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 5ea83dc4f1d7..dad2be333d82 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -152,6 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) ap_msg->receive(aq, ap_msg, aq->reply); break; } + /* fall through */ case AP_RESPONSE_NO_PENDING_REPLY: if (!status.queue_empty || aq->queue_count <= 0) break; diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 12fe9deb265e..a36251d138fb 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -801,10 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq, if (msg->cprbx.cprb_ver_id == 0x02) return convert_type86_ica(zq, reply, outputdata, outputdatalength); - /* - * Fall through, no break, incorrect cprb version is an unknown - * response - */ + /* fall through - wrong cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; pr_err("Cryptographic device %02x.%04x failed and was set offline\n", @@ -837,10 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq, } if (msg->cprbx.cprb_ver_id == 0x02) return convert_type86_xcrb(zq, reply, xcRB); - /* - * Fall through, no break, incorrect cprb version is an unknown - * response - */ + /* fall through - wrong cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ zq->online = 0; @@ -870,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq, return convert_error(zq, reply); if (msg->cprbx.cprb_ver_id == 0x04) return convert_type86_ep11_xcrb(zq, reply, xcRB); - /* Fall through, no break, incorrect cprb version is an unknown resp.*/ + /* fall through - wrong cprb version is an unknown resp */ default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; pr_err("Cryptographic device %02x.%04x failed and was set offline\n", @@ -900,10 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq, return -EINVAL; if (msg->cprbx.cprb_ver_id == 0x02) return convert_type86_rng(zq, reply, data); - /* - * Fall through, no break, incorrect cprb version is an unknown - * response - */ + /* fall through - wrong cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; pr_err("Cryptographic device %02x.%04x failed and was set offline\n", diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 1a55e5942d36..957889a42d2e 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -145,6 +145,8 @@ struct airq_info { struct airq_iv *aiv; }; static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; +static DEFINE_MUTEX(airq_areas_lock); + static u8 *summary_indicators; static inline u8 *get_summary_indicator(struct airq_info *info) @@ -265,9 +267,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, unsigned long bit, flags; for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { + mutex_lock(&airq_areas_lock); if (!airq_areas[i]) airq_areas[i] = new_airq_info(i); info = airq_areas[i]; + mutex_unlock(&airq_areas_lock); if (!info) return 0; write_lock_irqsave(&info->lock, flags); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 75f66f8ad3ea..1b92f3c19ff3 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1523,10 +1523,10 @@ config SCSI_VIRTIO source "drivers/scsi/csiostor/Kconfig" -endif # SCSI_LOWLEVEL - source "drivers/scsi/pcmcia/Kconfig" +endif # SCSI_LOWLEVEL + source "drivers/scsi/device_handler/Kconfig" endmenu diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index f0066f8a1786..4971104b1817 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -40,6 +40,7 @@ #define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_RETRIES 5 #define ALUA_RTPG_DELAY_MSECS 5 +#define ALUA_RTPG_RETRY_DELAY 2 /* device handler flags */ #define ALUA_OPTIMIZE_STPG 0x01 @@ -682,7 +683,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) case SCSI_ACCESS_STATE_TRANSITIONING: if (time_before(jiffies, pg->expiry)) { /* State transition, retry */ - pg->interval = 2; + pg->interval = ALUA_RTPG_RETRY_DELAY; err = SCSI_DH_RETRY; } else { struct alua_dh_data *h; @@ -807,6 +808,8 @@ static void alua_rtpg_work(struct work_struct *work) spin_lock_irqsave(&pg->lock, flags); pg->flags &= ~ALUA_PG_RUNNING; pg->flags |= ALUA_PG_RUN_RTPG; + if (!pg->interval) + pg->interval = ALUA_RTPG_RETRY_DELAY; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); @@ -818,6 +821,8 @@ static void alua_rtpg_work(struct work_struct *work) spin_lock_irqsave(&pg->lock, flags); if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { pg->flags &= ~ALUA_PG_RUNNING; + if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) + pg->interval = ALUA_RTPG_RETRY_DELAY; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(kaluad_wq, &pg->rtpg_work, diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 590ec8009f52..1791a393795d 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -1019,7 +1019,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fcoe_fcf *fcf; struct fcoe_fcf new; - unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); + unsigned long sol_tov = msecs_to_jiffies(FCOE_CTLR_SOL_TOV); int first = 0; int mtu_valid; int found = 0; @@ -2005,7 +2005,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); */ static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) { - return (struct fcoe_rport *)(rdata + 1); + return container_of(rdata, struct fcoe_rport, rdata); } /** @@ -2269,7 +2269,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip) */ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, struct sk_buff *skb, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { struct fip_header *fiph; struct fip_desc *desc = NULL; @@ -2277,16 +2277,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, struct fip_wwn_desc *wwn = NULL; struct fip_vn_desc *vn = NULL; struct fip_size_desc *size = NULL; - struct fcoe_rport *frport; size_t rlen; size_t dlen; u32 desc_mask = 0; u32 dtype; u8 sub; - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); - frport = fcoe_ctlr_rport(rdata); - fiph = (struct fip_header *)skb->data; frport->flags = ntohs(fiph->fip_flags); @@ -2349,15 +2345,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, if (dlen != sizeof(struct fip_wwn_desc)) goto len_err; wwn = (struct fip_wwn_desc *)desc; - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + frport->rdata.ids.node_name = + get_unaligned_be64(&wwn->fd_wwn); break; case FIP_DT_VN_ID: if (dlen != sizeof(struct fip_vn_desc)) goto len_err; vn = (struct fip_vn_desc *)desc; memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); - rdata->ids.port_id = ntoh24(vn->fd_fc_id); - rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn); + frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id); + frport->rdata.ids.port_name = + get_unaligned_be64(&vn->fd_wwpn); break; case FIP_DT_FC4F: if (dlen != sizeof(struct fip_fc4_feat)) @@ -2403,16 +2401,14 @@ static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip) /** * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. * @fip: The FCoE controller - * @rdata: parsed remote port with frport from the probe request + * @frport: parsed FCoE rport from the probe request * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { - struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); - - if (rdata->ids.port_id != fip->port_id) + if (frport->rdata.ids.port_id != fip->port_id) return; switch (fip->state) { @@ -2432,7 +2428,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, * Probe's REC bit is not set. * If we don't reply, we will change our address. */ - if (fip->lp->wwpn > rdata->ids.port_name && + if (fip->lp->wwpn > frport->rdata.ids.port_name && !(frport->flags & FIP_FL_REC_OR_P2P)) { LIBFCOE_FIP_DBG(fip, "vn_probe_req: " "port_id collision\n"); @@ -2456,14 +2452,14 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. * @fip: The FCoE controller - * @rdata: parsed remote port with frport from the probe request + * @frport: parsed FCoE rport from the probe request * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { - if (rdata->ids.port_id != fip->port_id) + if (frport->rdata.ids.port_id != fip->port_id) return; switch (fip->state) { case FIP_ST_VNMP_START: @@ -2486,11 +2482,11 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. * @fip: The FCoE controller - * @new: newly-parsed remote port with frport as a template for new rdata + * @new: newly-parsed FCoE rport as a template for new rdata * * Called with ctlr_mutex held. */ -static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) +static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new) { struct fc_lport *lport = fip->lp; struct fc_rport_priv *rdata; @@ -2498,7 +2494,7 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) struct fcoe_rport *frport; u32 port_id; - port_id = new->ids.port_id; + port_id = new->rdata.ids.port_id; if (port_id == fip->port_id) return; @@ -2515,22 +2511,28 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) rdata->disc_id = lport->disc.disc_id; ids = &rdata->ids; - if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || - (ids->node_name != -1 && ids->node_name != new->ids.node_name)) { + if ((ids->port_name != -1 && + ids->port_name != new->rdata.ids.port_name) || + (ids->node_name != -1 && + ids->node_name != new->rdata.ids.node_name)) { mutex_unlock(&rdata->rp_mutex); LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id); fc_rport_logoff(rdata); mutex_lock(&rdata->rp_mutex); } - ids->port_name = new->ids.port_name; - ids->node_name = new->ids.node_name; + ids->port_name = new->rdata.ids.port_name; + ids->node_name = new->rdata.ids.node_name; mutex_unlock(&rdata->rp_mutex); frport = fcoe_ctlr_rport(rdata); LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n", port_id, frport->fcoe_len ? "old" : "new", rdata->rp_state); - *frport = *fcoe_ctlr_rport(new); + frport->fcoe_len = new->fcoe_len; + frport->flags = new->flags; + frport->login_count = new->login_count; + memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN); + memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN); frport->time = 0; } @@ -2562,16 +2564,14 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) /** * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification * @fip: The FCoE controller - * @new: newly-parsed remote port with frport as a template for new rdata + * @new: newly-parsed FCoE rport as a template for new rdata * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, - struct fc_rport_priv *new) + struct fcoe_rport *new) { - struct fcoe_rport *frport = fcoe_ctlr_rport(new); - - if (frport->flags & FIP_FL_REC_OR_P2P) { + if (new->flags & FIP_FL_REC_OR_P2P) { LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; @@ -2580,7 +2580,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, case FIP_ST_VNMP_START: case FIP_ST_VNMP_PROBE1: case FIP_ST_VNMP_PROBE2: - if (new->ids.port_id == fip->port_id) { + if (new->rdata.ids.port_id == fip->port_id) { LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " "restart, state %d\n", fip->state); @@ -2589,8 +2589,8 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, break; case FIP_ST_VNMP_CLAIM: case FIP_ST_VNMP_UP: - if (new->ids.port_id == fip->port_id) { - if (new->ids.port_name > fip->lp->wwpn) { + if (new->rdata.ids.port_id == fip->port_id) { + if (new->rdata.ids.port_name > fip->lp->wwpn) { LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " "restart, port_id collision\n"); fcoe_ctlr_vn_restart(fip); @@ -2602,15 +2602,16 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, break; } LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n", - new->ids.port_id); - fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, - min((u32)frport->fcoe_len, + new->rdata.ids.port_id); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac, + min((u32)new->fcoe_len, fcoe_ctlr_fcoe_size(fip))); fcoe_ctlr_vn_add(fip, new); break; default: LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " - "ignoring claim from %x\n", new->ids.port_id); + "ignoring claim from %x\n", + new->rdata.ids.port_id); break; } } @@ -2618,15 +2619,15 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vn_claim_resp() - handle received Claim Response * @fip: The FCoE controller that received the frame - * @new: newly-parsed remote port with frport from the Claim Response + * @new: newly-parsed FCoE rport from the Claim Response * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, - struct fc_rport_priv *new) + struct fcoe_rport *new) { LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", - new->ids.port_id, fcoe_ctlr_state(fip->state)); + new->rdata.ids.port_id, fcoe_ctlr_state(fip->state)); if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) fcoe_ctlr_vn_add(fip, new); } @@ -2634,28 +2635,28 @@ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vn_beacon() - handle received beacon. * @fip: The FCoE controller that received the frame - * @new: newly-parsed remote port with frport from the Beacon + * @new: newly-parsed FCoE rport from the Beacon * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, - struct fc_rport_priv *new) + struct fcoe_rport *new) { struct fc_lport *lport = fip->lp; struct fc_rport_priv *rdata; struct fcoe_rport *frport; - frport = fcoe_ctlr_rport(new); - if (frport->flags & FIP_FL_REC_OR_P2P) { + if (new->flags & FIP_FL_REC_OR_P2P) { LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n"); fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; } - rdata = fc_rport_lookup(lport, new->ids.port_id); + rdata = fc_rport_lookup(lport, new->rdata.ids.port_id); if (rdata) { - if (rdata->ids.node_name == new->ids.node_name && - rdata->ids.port_name == new->ids.port_name) { + if (rdata->ids.node_name == new->rdata.ids.node_name && + rdata->ids.port_name == new->rdata.ids.port_name) { frport = fcoe_ctlr_rport(rdata); + LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n", rdata->ids.port_id); if (!frport->time && fip->state == FIP_ST_VNMP_UP) { @@ -2678,7 +2679,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, * Don't add the neighbor yet. */ LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", - new->ids.port_id); + new->rdata.ids.port_id); if (time_after(jiffies, fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) fcoe_ctlr_vn_send_claim(fip); @@ -2738,10 +2739,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; enum fip_vn2vn_subcode sub; - struct { - struct fc_rport_priv rdata; - struct fcoe_rport frport; - } buf; + struct fcoe_rport frport = { }; int rc, vlan_id = 0; fiph = (struct fip_header *)skb->data; @@ -2757,7 +2755,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) goto drop; } - rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); + rc = fcoe_ctlr_vn_parse(fip, skb, &frport); if (rc) { LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); goto drop; @@ -2766,19 +2764,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) mutex_lock(&fip->ctlr_mutex); switch (sub) { case FIP_SC_VN_PROBE_REQ: - fcoe_ctlr_vn_probe_req(fip, &buf.rdata); + fcoe_ctlr_vn_probe_req(fip, &frport); break; case FIP_SC_VN_PROBE_REP: - fcoe_ctlr_vn_probe_reply(fip, &buf.rdata); + fcoe_ctlr_vn_probe_reply(fip, &frport); break; case FIP_SC_VN_CLAIM_NOTIFY: - fcoe_ctlr_vn_claim_notify(fip, &buf.rdata); + fcoe_ctlr_vn_claim_notify(fip, &frport); break; case FIP_SC_VN_CLAIM_REP: - fcoe_ctlr_vn_claim_resp(fip, &buf.rdata); + fcoe_ctlr_vn_claim_resp(fip, &frport); break; case FIP_SC_VN_BEACON: - fcoe_ctlr_vn_beacon(fip, &buf.rdata); + fcoe_ctlr_vn_beacon(fip, &frport); break; default: LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); @@ -2802,22 +2800,18 @@ drop: */ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, struct sk_buff *skb, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { struct fip_header *fiph; struct fip_desc *desc = NULL; struct fip_mac_desc *macd = NULL; struct fip_wwn_desc *wwn = NULL; - struct fcoe_rport *frport; size_t rlen; size_t dlen; u32 desc_mask = 0; u32 dtype; u8 sub; - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); - frport = fcoe_ctlr_rport(rdata); - fiph = (struct fip_header *)skb->data; frport->flags = ntohs(fiph->fip_flags); @@ -2871,7 +2865,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, if (dlen != sizeof(struct fip_wwn_desc)) goto len_err; wwn = (struct fip_wwn_desc *)desc; - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + frport->rdata.ids.node_name = + get_unaligned_be64(&wwn->fd_wwn); break; default: LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " @@ -2957,13 +2952,13 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip, /** * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification. * @fip: The FCoE controller + * @frport: The newly-parsed FCoE rport from the Discovery Request * * Called with ctlr_mutex held. */ static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { - struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); enum fip_vlan_subcode sub = FIP_SC_VL_NOTE; if (fip->mode == FIP_MODE_VN2VN) @@ -2982,22 +2977,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; enum fip_vlan_subcode sub; - struct { - struct fc_rport_priv rdata; - struct fcoe_rport frport; - } buf; + struct fcoe_rport frport = { }; int rc; fiph = (struct fip_header *)skb->data; sub = fiph->fip_subcode; - rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata); + rc = fcoe_ctlr_vlan_parse(fip, skb, &frport); if (rc) { LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); goto drop; } mutex_lock(&fip->ctlr_mutex); if (sub == FIP_SC_VL_REQ) - fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata); + fcoe_ctlr_vlan_disc_reply(fip, &frport); mutex_unlock(&fip->ctlr_mutex); drop: diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 43a6b5350775..1bb6aada93fa 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2334,6 +2334,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, case IOACCEL2_SERV_RESPONSE_COMPLETE: switch (c2->error_data.status) { case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: + if (cmd) + cmd->result = 0; break; case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: cmd->result |= SAM_STAT_CHECK_CONDITION; @@ -2483,8 +2485,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h, /* check for good status */ if (likely(c2->error_data.serv_response == 0 && - c2->error_data.status == 0)) + c2->error_data.status == 0)) { + cmd->result = 0; return hpsa_cmd_free_and_done(h, c, cmd); + } /* * Any RAID offload error results in retry which will use @@ -5654,6 +5658,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) return SCSI_MLQUEUE_DEVICE_BUSY; /* + * This is necessary because the SML doesn't zero out this field during + * error recovery. + */ + cmd->result = 0; + + /* * Call alternate submit routine for I/O accelerated commands. * Retries always go down the normal I/O path. */ @@ -6081,8 +6091,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, if (idx != h->last_collision_tag) { /* Print once per tag */ dev_warn(&h->pdev->dev, "%s: tag collision (tag=%d)\n", __func__, idx); - if (c->scsi_cmd != NULL) - scsi_print_command(c->scsi_cmd); if (scmd) scsi_print_command(scmd); h->last_collision_tag = idx; @@ -7798,7 +7806,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h) hpsa_disable_interrupt_mode(h); /* pci_init 2 */ /* * call pci_disable_device before pci_release_regions per - * Documentation/PCI/pci.rst + * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); /* pci_init 1 */ pci_release_regions(h->pdev); /* pci_init 2 */ @@ -7881,7 +7889,7 @@ clean2: /* intmode+region, pci */ clean1: /* * call pci_disable_device before pci_release_regions per - * Documentation/PCI/pci.rst + * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); pci_release_regions(h->pdev); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index acd16e0d52cf..8cdbac076a1b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4864,8 +4864,8 @@ static int ibmvfc_remove(struct vio_dev *vdev) spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_purge_requests(vhost, DID_ERROR); - ibmvfc_free_event_pool(vhost); spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_free_event_pool(vhost); ibmvfc_free_mem(vhost); spin_lock(&ibmvfc_driver_lock); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index e0f3852fdad1..da6e97d8dc3b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -128,6 +128,7 @@ EXPORT_SYMBOL(fc_rport_lookup); struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) { struct fc_rport_priv *rdata; + size_t rport_priv_size = sizeof(*rdata); lockdep_assert_held(&lport->disc.disc_mutex); @@ -135,7 +136,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) if (rdata) return rdata; - rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL); + if (lport->rport_priv_size > 0) + rport_priv_size = lport->rport_priv_size; + rdata = kzalloc(rport_priv_size, GFP_KERNEL); if (!rdata) return NULL; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index b2339d04a700..f9f07935556e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3163,6 +3163,7 @@ fw_crash_buffer_show(struct device *cdev, (struct megasas_instance *) shost->hostdata; u32 size; unsigned long dmachunk = CRASH_DMA_BUF_SIZE; + unsigned long chunk_left_bytes; unsigned long src_addr; unsigned long flags; u32 buff_offset; @@ -3186,6 +3187,8 @@ fw_crash_buffer_show(struct device *cdev, } size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; + chunk_left_bytes = dmachunk - (buff_offset % dmachunk); + size = (size > chunk_left_bytes) ? chunk_left_bytes : size; size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + @@ -8763,7 +8766,7 @@ static int __init megasas_init(void) if ((event_log_level < MFI_EVT_CLASS_DEBUG) || (event_log_level > MFI_EVT_CLASS_DEAD)) { - printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); + pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); event_log_level = MFI_EVT_CLASS_CRITICAL; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a32b3f0fcd15..120e3c4de8c2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -537,7 +537,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) return 0; } -int +static int megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) { u32 max_mpt_cmd, i, j; @@ -576,7 +576,8 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) return 0; } -int + +static int megasas_alloc_request_fusion(struct megasas_instance *instance) { struct fusion_context *fusion; @@ -657,7 +658,7 @@ retry_alloc: return 0; } -int +static int megasas_alloc_reply_fusion(struct megasas_instance *instance) { int i, count; @@ -734,7 +735,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance) return 0; } -int +static int megasas_alloc_rdpq_fusion(struct megasas_instance *instance) { int i, j, k, msix_count; @@ -916,7 +917,7 @@ megasas_free_reply_fusion(struct megasas_instance *instance) { * and is used as SMID of the cmd. * SMID value range is from 1 to max_fw_cmds. */ -int +static int megasas_alloc_cmds_fusion(struct megasas_instance *instance) { int i; @@ -1736,7 +1737,7 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance) * * This is the main function for initializing firmware. */ -u32 +static u32 megasas_init_adapter_fusion(struct megasas_instance *instance) { struct fusion_context *fusion; @@ -1962,7 +1963,7 @@ megasas_fusion_stop_watchdog(struct megasas_instance *instance) * @ext_status : ext status of cmd returned by FW */ -void +static void map_cmd_status(struct fusion_context *fusion, struct scsi_cmnd *scmd, u8 status, u8 ext_status, u32 data_length, u8 *sense) @@ -2375,7 +2376,7 @@ int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, * * Used to set the PD LBA in CDB for FP IOs */ -void +static void megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) @@ -2714,7 +2715,7 @@ megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion, * Prepares the io_request and chain elements (sg_frame) for IO * The IO can be for PD (Fast Path) or LD */ -void +static void megasas_build_ldio_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) @@ -3211,7 +3212,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, * Invokes helper functions to prepare request frames * and sets flags appropriate for IO/Non-IO cmd */ -int +static int megasas_build_io_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) @@ -3325,9 +3326,9 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) /* megasas_prepate_secondRaid1_IO * It prepares the raid 1 second IO */ -void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, - struct megasas_cmd_fusion *cmd, - struct megasas_cmd_fusion *r1_cmd) +static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd, + struct megasas_cmd_fusion *r1_cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; struct fusion_context *fusion; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 684662888792..050c0f029ef9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2703,6 +2703,8 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { u64 required_mask, coherent_mask; struct sysinfo s; + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ + int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64; if (ioc->is_mcpu_endpoint) goto try_32bit; @@ -2712,17 +2714,17 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) goto try_32bit; if (ioc->dma_mask) - coherent_mask = DMA_BIT_MASK(64); + coherent_mask = DMA_BIT_MASK(dma_mask); else coherent_mask = DMA_BIT_MASK(32); - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || dma_set_coherent_mask(&pdev->dev, coherent_mask)) goto try_32bit; ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); - ioc->dma_mask = 64; + ioc->dma_mask = dma_mask; goto out; try_32bit: @@ -2744,7 +2746,7 @@ static int _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) return -ENODEV; } @@ -4989,7 +4991,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) total_sz += sz; } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); - if (ioc->dma_mask == 64) { + if (ioc->dma_mask > 32) { if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { ioc_warn(ioc, "no suitable consistent DMA mask for %s\n", pci_name(ioc->pdev)); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4059655639d9..da83034d4759 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) ql_log(ql_log_warn, vha, 0xd049, "Failed to allocate ct_sns request.\n"); kfree(fcport); - fcport = NULL; + return NULL; } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9381171c2fc0..11e64b50497f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1784,8 +1784,10 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); } - shost->max_sectors = min_t(unsigned int, shost->max_sectors, - dma_max_mapping_size(dev) << SECTOR_SHIFT); + if (dev->dma_mask) { + shost->max_sectors = min_t(unsigned int, shost->max_sectors, + dma_max_mapping_size(dev) >> SECTOR_SHIFT); + } blk_queue_max_hw_sectors(q, shost->max_sectors); if (shost->unchecked_isa_dma) blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 62c6ba17991a..c9519e62308c 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -419,7 +419,7 @@ static void qe_upload_microcode(const void *base, /* * Upload a microcode to the I-RAM at a specific address. * - * See Documentation/powerpc/qe_firmware.txt for information on QE microcode + * See Documentation/powerpc/qe_firmware.rst for information on QE microcode * uploading. * * Currently, only version 1 is supported, so the 'version' field must be diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 22dd4c457d6a..c70caf4ea490 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -875,10 +875,12 @@ static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port) return 0; if (caps & DCB_CAP_DCBX_VER_IEEE) { - iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; - + iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM; ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); - + if (!ret) { + iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; + ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); + } } else if (caps & DCB_CAP_DCBX_VER_CEE) { iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index 343b129c2cfa..e877b917c15f 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -589,7 +589,8 @@ static void cxgbit_dcb_workfn(struct work_struct *work) iscsi_app = &dcb_work->dcb_app; if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { - if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) + if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) && + (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)) goto out; priority = iscsi_app->app.priority; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index 213ab3cc6b80..d3446acf9bbd 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -487,6 +487,7 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev, rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep); if (ret < 0) { powercap_unregister_control_type(rapl_mmio_priv.control_type); + rapl_mmio_priv.control_type = NULL; return ret; } rapl_mmio_priv.pcap_rapl_online = ret; @@ -496,6 +497,9 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev, static void proc_thermal_rapl_remove(void) { + if (IS_ERR_OR_NULL(rapl_mmio_priv.control_type)) + return; + cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online); powercap_unregister_control_type(rapl_mmio_priv.control_type); } diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c index cb4db1b3ca3c..5fb214e67d73 100644 --- a/drivers/tty/hvc/hvcs.c +++ b/drivers/tty/hvc/hvcs.c @@ -47,7 +47,7 @@ * using the 2.6 Linux kernel kref construct. * * For direction on installation and usage of this driver please reference - * Documentation/powerpc/hvcs.txt. + * Documentation/powerpc/hvcs.rst. */ #include <linux/device.h> diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index fd385c8c53a5..3083dbae35f7 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1035,25 +1035,6 @@ config SERIAL_VT8500_CONSOLE depends on SERIAL_VT8500=y select SERIAL_CORE_CONSOLE -config SERIAL_NETX - tristate "NetX serial port support" - depends on ARCH_NETX - select SERIAL_CORE - help - If you have a machine based on a Hilscher NetX SoC you - can enable its onboard serial port by enabling this option. - - To compile this driver as a module, choose M here: the - module will be called netx-serial. - -config SERIAL_NETX_CONSOLE - bool "Console on NetX serial port" - depends on SERIAL_NETX=y - select SERIAL_CORE_CONSOLE - help - If you have enabled the serial port on the Hilscher NetX SoC - you can make it the console by answering Y to this option. - config SERIAL_OMAP tristate "OMAP serial port support" depends on ARCH_OMAP2PLUS diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 7cd7cabfa6c4..15a0fccadf7e 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -59,7 +59,6 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_MSM) += msm_serial.o obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o -obj-$(CONFIG_SERIAL_NETX) += netx-serial.o obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c deleted file mode 100644 index b3556863491f..000000000000 --- a/drivers/tty/serial/netx-serial.c +++ /dev/null @@ -1,733 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) -#define SUPPORT_SYSRQ -#endif - -#include <linux/device.h> -#include <linux/module.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/console.h> -#include <linux/sysrq.h> -#include <linux/platform_device.h> -#include <linux/tty.h> -#include <linux/tty_flip.h> -#include <linux/serial_core.h> -#include <linux/serial.h> - -#include <asm/io.h> -#include <asm/irq.h> -#include <mach/hardware.h> -#include <mach/netx-regs.h> - -/* We've been assigned a range on the "Low-density serial ports" major */ -#define SERIAL_NX_MAJOR 204 -#define MINOR_START 170 - -enum uart_regs { - UART_DR = 0x00, - UART_SR = 0x04, - UART_LINE_CR = 0x08, - UART_BAUDDIV_MSB = 0x0c, - UART_BAUDDIV_LSB = 0x10, - UART_CR = 0x14, - UART_FR = 0x18, - UART_IIR = 0x1c, - UART_ILPR = 0x20, - UART_RTS_CR = 0x24, - UART_RTS_LEAD = 0x28, - UART_RTS_TRAIL = 0x2c, - UART_DRV_ENABLE = 0x30, - UART_BRM_CR = 0x34, - UART_RXFIFO_IRQLEVEL = 0x38, - UART_TXFIFO_IRQLEVEL = 0x3c, -}; - -#define SR_FE (1<<0) -#define SR_PE (1<<1) -#define SR_BE (1<<2) -#define SR_OE (1<<3) - -#define LINE_CR_BRK (1<<0) -#define LINE_CR_PEN (1<<1) -#define LINE_CR_EPS (1<<2) -#define LINE_CR_STP2 (1<<3) -#define LINE_CR_FEN (1<<4) -#define LINE_CR_5BIT (0<<5) -#define LINE_CR_6BIT (1<<5) -#define LINE_CR_7BIT (2<<5) -#define LINE_CR_8BIT (3<<5) -#define LINE_CR_BITS_MASK (3<<5) - -#define CR_UART_EN (1<<0) -#define CR_SIREN (1<<1) -#define CR_SIRLP (1<<2) -#define CR_MSIE (1<<3) -#define CR_RIE (1<<4) -#define CR_TIE (1<<5) -#define CR_RTIE (1<<6) -#define CR_LBE (1<<7) - -#define FR_CTS (1<<0) -#define FR_DSR (1<<1) -#define FR_DCD (1<<2) -#define FR_BUSY (1<<3) -#define FR_RXFE (1<<4) -#define FR_TXFF (1<<5) -#define FR_RXFF (1<<6) -#define FR_TXFE (1<<7) - -#define IIR_MIS (1<<0) -#define IIR_RIS (1<<1) -#define IIR_TIS (1<<2) -#define IIR_RTIS (1<<3) -#define IIR_MASK 0xf - -#define RTS_CR_AUTO (1<<0) -#define RTS_CR_RTS (1<<1) -#define RTS_CR_COUNT (1<<2) -#define RTS_CR_MOD2 (1<<3) -#define RTS_CR_RTS_POL (1<<4) -#define RTS_CR_CTS_CTR (1<<5) -#define RTS_CR_CTS_POL (1<<6) -#define RTS_CR_STICK (1<<7) - -#define UART_PORT_SIZE 0x40 -#define DRIVER_NAME "netx-uart" - -struct netx_port { - struct uart_port port; -}; - -static void netx_stop_tx(struct uart_port *port) -{ - unsigned int val; - val = readl(port->membase + UART_CR); - writel(val & ~CR_TIE, port->membase + UART_CR); -} - -static void netx_stop_rx(struct uart_port *port) -{ - unsigned int val; - val = readl(port->membase + UART_CR); - writel(val & ~CR_RIE, port->membase + UART_CR); -} - -static void netx_enable_ms(struct uart_port *port) -{ - unsigned int val; - val = readl(port->membase + UART_CR); - writel(val | CR_MSIE, port->membase + UART_CR); -} - -static inline void netx_transmit_buffer(struct uart_port *port) -{ - struct circ_buf *xmit = &port->state->xmit; - - if (port->x_char) { - writel(port->x_char, port->membase + UART_DR); - port->icount.tx++; - port->x_char = 0; - return; - } - - if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { - netx_stop_tx(port); - return; - } - - do { - /* send xmit->buf[xmit->tail] - * out the port here */ - writel(xmit->buf[xmit->tail], port->membase + UART_DR); - xmit->tail = (xmit->tail + 1) & - (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (!(readl(port->membase + UART_FR) & FR_TXFF)); - - if (uart_circ_empty(xmit)) - netx_stop_tx(port); -} - -static void netx_start_tx(struct uart_port *port) -{ - writel( - readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR); - - if (!(readl(port->membase + UART_FR) & FR_TXFF)) - netx_transmit_buffer(port); -} - -static unsigned int netx_tx_empty(struct uart_port *port) -{ - return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT; -} - -static void netx_txint(struct uart_port *port) -{ - struct circ_buf *xmit = &port->state->xmit; - - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - netx_stop_tx(port); - return; - } - - netx_transmit_buffer(port); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); -} - -static void netx_rxint(struct uart_port *port, unsigned long *flags) -{ - unsigned char rx, flg, status; - - while (!(readl(port->membase + UART_FR) & FR_RXFE)) { - rx = readl(port->membase + UART_DR); - flg = TTY_NORMAL; - port->icount.rx++; - status = readl(port->membase + UART_SR); - if (status & SR_BE) { - writel(0, port->membase + UART_SR); - if (uart_handle_break(port)) - continue; - } - - if (unlikely(status & (SR_FE | SR_PE | SR_OE))) { - - if (status & SR_PE) - port->icount.parity++; - else if (status & SR_FE) - port->icount.frame++; - if (status & SR_OE) - port->icount.overrun++; - - status &= port->read_status_mask; - - if (status & SR_BE) - flg = TTY_BREAK; - else if (status & SR_PE) - flg = TTY_PARITY; - else if (status & SR_FE) - flg = TTY_FRAME; - } - - if (uart_handle_sysrq_char(port, rx)) - continue; - - uart_insert_char(port, status, SR_OE, rx, flg); - } - - spin_unlock_irqrestore(&port->lock, *flags); - tty_flip_buffer_push(&port->state->port); - spin_lock_irqsave(&port->lock, *flags); -} - -static irqreturn_t netx_int(int irq, void *dev_id) -{ - struct uart_port *port = dev_id; - unsigned long flags; - unsigned char status; - - spin_lock_irqsave(&port->lock,flags); - - status = readl(port->membase + UART_IIR) & IIR_MASK; - while (status) { - if (status & IIR_RIS) - netx_rxint(port, &flags); - if (status & IIR_TIS) - netx_txint(port); - if (status & IIR_MIS) { - if (readl(port->membase + UART_FR) & FR_CTS) - uart_handle_cts_change(port, 1); - else - uart_handle_cts_change(port, 0); - } - writel(0, port->membase + UART_IIR); - status = readl(port->membase + UART_IIR) & IIR_MASK; - } - - spin_unlock_irqrestore(&port->lock,flags); - return IRQ_HANDLED; -} - -static unsigned int netx_get_mctrl(struct uart_port *port) -{ - unsigned int ret = TIOCM_DSR | TIOCM_CAR; - - if (readl(port->membase + UART_FR) & FR_CTS) - ret |= TIOCM_CTS; - - return ret; -} - -static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl) -{ - unsigned int val; - - /* FIXME: Locking needed ? */ - if (mctrl & TIOCM_RTS) { - val = readl(port->membase + UART_RTS_CR); - writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR); - } -} - -static void netx_break_ctl(struct uart_port *port, int break_state) -{ - unsigned int line_cr; - spin_lock_irq(&port->lock); - - line_cr = readl(port->membase + UART_LINE_CR); - if (break_state != 0) - line_cr |= LINE_CR_BRK; - else - line_cr &= ~LINE_CR_BRK; - writel(line_cr, port->membase + UART_LINE_CR); - - spin_unlock_irq(&port->lock); -} - -static int netx_startup(struct uart_port *port) -{ - int ret; - - ret = request_irq(port->irq, netx_int, 0, - DRIVER_NAME, port); - if (ret) { - dev_err(port->dev, "unable to grab irq%d\n",port->irq); - goto exit; - } - - writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN, - port->membase + UART_LINE_CR); - - writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN, - port->membase + UART_CR); - -exit: - return ret; -} - -static void netx_shutdown(struct uart_port *port) -{ - writel(0, port->membase + UART_CR) ; - - free_irq(port->irq, port); -} - -static void -netx_set_termios(struct uart_port *port, struct ktermios *termios, - struct ktermios *old) -{ - unsigned int baud, quot; - unsigned char old_cr; - unsigned char line_cr = LINE_CR_FEN; - unsigned char rts_cr = 0; - - switch (termios->c_cflag & CSIZE) { - case CS5: - line_cr |= LINE_CR_5BIT; - break; - case CS6: - line_cr |= LINE_CR_6BIT; - break; - case CS7: - line_cr |= LINE_CR_7BIT; - break; - case CS8: - line_cr |= LINE_CR_8BIT; - break; - } - - if (termios->c_cflag & CSTOPB) - line_cr |= LINE_CR_STP2; - - if (termios->c_cflag & PARENB) { - line_cr |= LINE_CR_PEN; - if (!(termios->c_cflag & PARODD)) - line_cr |= LINE_CR_EPS; - } - - if (termios->c_cflag & CRTSCTS) - rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL; - - baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); - quot = baud * 4096; - quot /= 1000; - quot *= 256; - quot /= 100000; - - spin_lock_irq(&port->lock); - - uart_update_timeout(port, termios->c_cflag, baud); - - old_cr = readl(port->membase + UART_CR); - - /* disable interrupts */ - writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE), - port->membase + UART_CR); - - /* drain transmitter */ - while (readl(port->membase + UART_FR) & FR_BUSY); - - /* disable UART */ - writel(old_cr & ~CR_UART_EN, port->membase + UART_CR); - - /* modem status interrupts */ - old_cr &= ~CR_MSIE; - if (UART_ENABLE_MS(port, termios->c_cflag)) - old_cr |= CR_MSIE; - - writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB); - writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB); - writel(line_cr, port->membase + UART_LINE_CR); - - writel(rts_cr, port->membase + UART_RTS_CR); - - /* - * Characters to ignore - */ - port->ignore_status_mask = 0; - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= SR_PE; - if (termios->c_iflag & IGNBRK) { - port->ignore_status_mask |= SR_BE; - /* - * If we're ignoring parity and break indicators, - * ignore overruns too (for real raw support). - */ - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= SR_PE; - } - - port->read_status_mask = 0; - if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) - port->read_status_mask |= SR_BE; - if (termios->c_iflag & INPCK) - port->read_status_mask |= SR_PE | SR_FE; - - writel(old_cr, port->membase + UART_CR); - - spin_unlock_irq(&port->lock); -} - -static const char *netx_type(struct uart_port *port) -{ - return port->type == PORT_NETX ? "NETX" : NULL; -} - -static void netx_release_port(struct uart_port *port) -{ - release_mem_region(port->mapbase, UART_PORT_SIZE); -} - -static int netx_request_port(struct uart_port *port) -{ - return request_mem_region(port->mapbase, UART_PORT_SIZE, - DRIVER_NAME) != NULL ? 0 : -EBUSY; -} - -static void netx_config_port(struct uart_port *port, int flags) -{ - if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0) - port->type = PORT_NETX; -} - -static int -netx_verify_port(struct uart_port *port, struct serial_struct *ser) -{ - int ret = 0; - - if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX) - ret = -EINVAL; - - return ret; -} - -static struct uart_ops netx_pops = { - .tx_empty = netx_tx_empty, - .set_mctrl = netx_set_mctrl, - .get_mctrl = netx_get_mctrl, - .stop_tx = netx_stop_tx, - .start_tx = netx_start_tx, - .stop_rx = netx_stop_rx, - .enable_ms = netx_enable_ms, - .break_ctl = netx_break_ctl, - .startup = netx_startup, - .shutdown = netx_shutdown, - .set_termios = netx_set_termios, - .type = netx_type, - .release_port = netx_release_port, - .request_port = netx_request_port, - .config_port = netx_config_port, - .verify_port = netx_verify_port, -}; - -static struct netx_port netx_ports[] = { - { - .port = { - .type = PORT_NETX, - .iotype = UPIO_MEM, - .membase = (char __iomem *)io_p2v(NETX_PA_UART0), - .mapbase = NETX_PA_UART0, - .irq = NETX_IRQ_UART0, - .uartclk = 100000000, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .ops = &netx_pops, - .line = 0, - }, - }, { - .port = { - .type = PORT_NETX, - .iotype = UPIO_MEM, - .membase = (char __iomem *)io_p2v(NETX_PA_UART1), - .mapbase = NETX_PA_UART1, - .irq = NETX_IRQ_UART1, - .uartclk = 100000000, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .ops = &netx_pops, - .line = 1, - }, - }, { - .port = { - .type = PORT_NETX, - .iotype = UPIO_MEM, - .membase = (char __iomem *)io_p2v(NETX_PA_UART2), - .mapbase = NETX_PA_UART2, - .irq = NETX_IRQ_UART2, - .uartclk = 100000000, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .ops = &netx_pops, - .line = 2, - }, - } -}; - -#ifdef CONFIG_SERIAL_NETX_CONSOLE - -static void netx_console_putchar(struct uart_port *port, int ch) -{ - while (readl(port->membase + UART_FR) & FR_BUSY); - writel(ch, port->membase + UART_DR); -} - -static void -netx_console_write(struct console *co, const char *s, unsigned int count) -{ - struct uart_port *port = &netx_ports[co->index].port; - unsigned char cr_save; - - cr_save = readl(port->membase + UART_CR); - writel(cr_save | CR_UART_EN, port->membase + UART_CR); - - uart_console_write(port, s, count, netx_console_putchar); - - while (readl(port->membase + UART_FR) & FR_BUSY); - writel(cr_save, port->membase + UART_CR); -} - -static void __init -netx_console_get_options(struct uart_port *port, int *baud, - int *parity, int *bits, int *flow) -{ - unsigned char line_cr; - - *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) | - readl(port->membase + UART_BAUDDIV_LSB); - *baud *= 1000; - *baud /= 4096; - *baud *= 1000; - *baud /= 256; - *baud *= 100; - - line_cr = readl(port->membase + UART_LINE_CR); - *parity = 'n'; - if (line_cr & LINE_CR_PEN) { - if (line_cr & LINE_CR_EPS) - *parity = 'e'; - else - *parity = 'o'; - } - - switch (line_cr & LINE_CR_BITS_MASK) { - case LINE_CR_8BIT: - *bits = 8; - break; - case LINE_CR_7BIT: - *bits = 7; - break; - case LINE_CR_6BIT: - *bits = 6; - break; - case LINE_CR_5BIT: - *bits = 5; - break; - } - - if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO) - *flow = 'r'; -} - -static int __init -netx_console_setup(struct console *co, char *options) -{ - struct netx_port *sport; - int baud = 9600; - int bits = 8; - int parity = 'n'; - int flow = 'n'; - - /* - * Check whether an invalid uart number has been specified, and - * if so, search for the first available port that does have - * console support. - */ - if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports)) - co->index = 0; - sport = &netx_ports[co->index]; - - if (options) { - uart_parse_options(options, &baud, &parity, &bits, &flow); - } else { - /* if the UART is enabled, assume it has been correctly setup - * by the bootloader and get the options - */ - if (readl(sport->port.membase + UART_CR) & CR_UART_EN) { - netx_console_get_options(&sport->port, &baud, - &parity, &bits, &flow); - } - - } - - return uart_set_options(&sport->port, co, baud, parity, bits, flow); -} - -static struct uart_driver netx_reg; -static struct console netx_console = { - .name = "ttyNX", - .write = netx_console_write, - .device = uart_console_device, - .setup = netx_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, - .data = &netx_reg, -}; - -static int __init netx_console_init(void) -{ - register_console(&netx_console); - return 0; -} -console_initcall(netx_console_init); - -#define NETX_CONSOLE &netx_console -#else -#define NETX_CONSOLE NULL -#endif - -static struct uart_driver netx_reg = { - .owner = THIS_MODULE, - .driver_name = DRIVER_NAME, - .dev_name = "ttyNX", - .major = SERIAL_NX_MAJOR, - .minor = MINOR_START, - .nr = ARRAY_SIZE(netx_ports), - .cons = NETX_CONSOLE, -}; - -static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct netx_port *sport = platform_get_drvdata(pdev); - - if (sport) - uart_suspend_port(&netx_reg, &sport->port); - - return 0; -} - -static int serial_netx_resume(struct platform_device *pdev) -{ - struct netx_port *sport = platform_get_drvdata(pdev); - - if (sport) - uart_resume_port(&netx_reg, &sport->port); - - return 0; -} - -static int serial_netx_probe(struct platform_device *pdev) -{ - struct uart_port *port = &netx_ports[pdev->id].port; - - dev_info(&pdev->dev, "initialising\n"); - - port->dev = &pdev->dev; - - writel(1, port->membase + UART_RXFIFO_IRQLEVEL); - uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port); - platform_set_drvdata(pdev, &netx_ports[pdev->id]); - - return 0; -} - -static int serial_netx_remove(struct platform_device *pdev) -{ - struct netx_port *sport = platform_get_drvdata(pdev); - - if (sport) - uart_remove_one_port(&netx_reg, &sport->port); - - return 0; -} - -static struct platform_driver serial_netx_driver = { - .probe = serial_netx_probe, - .remove = serial_netx_remove, - - .suspend = serial_netx_suspend, - .resume = serial_netx_resume, - - .driver = { - .name = DRIVER_NAME, - }, -}; - -static int __init netx_serial_init(void) -{ - int ret; - - printk(KERN_INFO "Serial: NetX driver\n"); - - ret = uart_register_driver(&netx_reg); - if (ret) - return ret; - - ret = platform_driver_register(&serial_netx_driver); - if (ret != 0) - uart_unregister_driver(&netx_reg); - - return 0; -} - -static void __exit netx_serial_exit(void) -{ - platform_driver_unregister(&serial_netx_driver); - uart_unregister_driver(&netx_reg); -} - -module_init(netx_serial_init); -module_exit(netx_serial_exit); - -MODULE_AUTHOR("Sascha Hauer"); -MODULE_DESCRIPTION("NetX serial port driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 717292c1c0df..60ff236a3d63 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem) list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { tsk = waiter->task; - smp_mb(); - waiter->task = NULL; + smp_store_release(&waiter->task, NULL); wake_up_process(tsk); put_task_struct(tsk); } @@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout) for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); - if (!waiter.task) + if (!smp_load_acquire(&waiter.task)) break; if (!timeout) break; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index ec92f36ab5c4..34aa39d1aed9 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -3771,7 +3771,11 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr, char *buf) { struct con_driver *con = dev_get_drvdata(dev); - int bind = con_is_bound(con->con); + int bind; + + console_lock(); + bind = con_is_bound(con->con); + console_unlock(); return snprintf(buf, PAGE_SIZE, "%i\n", bind); } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 88533938ce19..9320787ac2e6 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -3052,8 +3052,8 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, local_mem = devm_memremap(hcd->self.sysdev, phys_addr, size, MEMREMAP_WC); - if (!local_mem) - return -ENOMEM; + if (IS_ERR(local_mem)) + return PTR_ERR(local_mem); /* * Here we pass a dma_addr_t but the arg type is a phys_addr_t. diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index fe9422d3bcdc..b0882c13a1d1 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -149,7 +149,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) break; case PCI_VENDOR_ID_AMD: /* AMD PLL quirk */ - if (usb_amd_find_chipset_info()) + if (usb_amd_quirk_pll_check()) ehci->amd_pll_fix = 1; /* AMD8111 EHCI doesn't work, according to AMD errata */ if (pdev->device == 0x7463) { @@ -186,7 +186,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) break; case PCI_VENDOR_ID_ATI: /* AMD PLL quirk */ - if (usb_amd_find_chipset_info()) + if (usb_amd_quirk_pll_check()) ehci->amd_pll_fix = 1; /* diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 09a8ebd95588..6968b9f2b76b 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c @@ -159,7 +159,7 @@ out: return result; error_set_cluster_id: - wusb_cluster_id_put(wusbhc->cluster_id); + wusb_cluster_id_put(addr); error_cluster_id_get: goto out; diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index a033f7d855e0..f4e13a3fddee 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -152,7 +152,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); - if (usb_amd_find_chipset_info()) + if (usb_amd_quirk_pll_check()) ohci->flags |= OHCI_QUIRK_AMD_PLL; /* SB800 needs pre-fetch fix */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 3ce71cbfbb58..f6d04491df60 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -132,7 +132,7 @@ static struct amd_chipset_info { struct amd_chipset_type sb_type; int isoc_reqs; int probe_count; - int probe_result; + bool need_pll_quirk; } amd_chipset; static DEFINE_SPINLOCK(amd_lock); @@ -201,11 +201,11 @@ void sb800_prefetch(struct device *dev, int on) } EXPORT_SYMBOL_GPL(sb800_prefetch); -int usb_amd_find_chipset_info(void) +static void usb_amd_find_chipset_info(void) { unsigned long flags; struct amd_chipset_info info; - int ret; + info.need_pll_quirk = 0; spin_lock_irqsave(&amd_lock, flags); @@ -213,27 +213,34 @@ int usb_amd_find_chipset_info(void) if (amd_chipset.probe_count > 0) { amd_chipset.probe_count++; spin_unlock_irqrestore(&amd_lock, flags); - return amd_chipset.probe_result; + return; } memset(&info, 0, sizeof(info)); spin_unlock_irqrestore(&amd_lock, flags); if (!amd_chipset_sb_type_init(&info)) { - ret = 0; goto commit; } - /* Below chipset generations needn't enable AMD PLL quirk */ - if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || - info.sb_type.gen == AMD_CHIPSET_SB600 || - info.sb_type.gen == AMD_CHIPSET_YANGTZE || - (info.sb_type.gen == AMD_CHIPSET_SB700 && - info.sb_type.rev > 0x3b)) { + switch (info.sb_type.gen) { + case AMD_CHIPSET_SB700: + info.need_pll_quirk = info.sb_type.rev <= 0x3B; + break; + case AMD_CHIPSET_SB800: + case AMD_CHIPSET_HUDSON2: + case AMD_CHIPSET_BOLTON: + info.need_pll_quirk = 1; + break; + default: + info.need_pll_quirk = 0; + break; + } + + if (!info.need_pll_quirk) { if (info.smbus_dev) { pci_dev_put(info.smbus_dev); info.smbus_dev = NULL; } - ret = 0; goto commit; } @@ -252,7 +259,6 @@ int usb_amd_find_chipset_info(void) } } - ret = info.probe_result = 1; printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); commit: @@ -263,7 +269,6 @@ commit: /* Mark that we where here */ amd_chipset.probe_count++; - ret = amd_chipset.probe_result; spin_unlock_irqrestore(&amd_lock, flags); @@ -276,10 +281,7 @@ commit: amd_chipset = info; spin_unlock_irqrestore(&amd_lock, flags); } - - return ret; } -EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) { @@ -315,6 +317,13 @@ bool usb_amd_prefetch_quirk(void) } EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk); +bool usb_amd_quirk_pll_check(void) +{ + usb_amd_find_chipset_info(); + return amd_chipset.need_pll_quirk; +} +EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check); + /* * The hardware normally enables the A-link power management feature, which * lets the system lower the power consumption in idle states. @@ -520,7 +529,7 @@ void usb_amd_dev_put(void) amd_chipset.nb_type = 0; memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type)); amd_chipset.isoc_reqs = 0; - amd_chipset.probe_result = 0; + amd_chipset.need_pll_quirk = 0; spin_unlock_irqrestore(&amd_lock, flags); diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index 63c633077d9e..e729de21fad7 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -5,11 +5,11 @@ #ifdef CONFIG_USB_PCI void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); -int usb_amd_find_chipset_info(void); int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev); bool usb_amd_hang_symptom_quirk(void); bool usb_amd_prefetch_quirk(void); void usb_amd_dev_put(void); +bool usb_amd_quirk_pll_check(void); void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c2fe218e051f..1e0236e90687 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -130,7 +130,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_AMD_0x96_HOST; /* AMD PLL quirk */ - if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) + if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_quirk_pll_check()) xhci->quirks |= XHCI_AMD_PLL_FIX; if (pdev->vendor == PCI_VENDOR_ID_AMD && diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7a264962a1a9..f5c41448d067 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -2175,7 +2175,8 @@ static inline bool xhci_urb_suitable_for_idt(struct urb *urb) if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) && usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE && urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE && - !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) + !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) && + !urb->num_sgs) return true; return false; diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 4d6ae3795a88..6ca9111d150a 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -375,7 +375,8 @@ out_err: #ifdef CONFIG_OF static void usb251xb_get_ports_field(struct usb251xb *hub, - const char *prop_name, u8 port_cnt, u8 *fld) + const char *prop_name, u8 port_cnt, + bool ds_only, u8 *fld) { struct device *dev = hub->dev; struct property *prop; @@ -383,7 +384,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub, u32 port; of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) { - if ((port >= 1) && (port <= port_cnt)) + if ((port >= ds_only ? 1 : 0) && (port <= port_cnt)) *fld |= BIT(port); else dev_warn(dev, "port %u doesn't exist\n", port); @@ -501,15 +502,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES; usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt, - &hub->non_rem_dev); + true, &hub->non_rem_dev); hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF; usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt, - &hub->port_disable_sp); + true, &hub->port_disable_sp); hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS; usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt, - &hub->port_disable_bp); + true, &hub->port_disable_bp); hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; if (!of_property_read_u32(np, "sp-max-total-current-microamp", @@ -573,9 +574,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, */ hub->port_swap = USB251XB_DEF_PORT_SWAP; usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt, - &hub->port_swap); - if (of_get_property(np, "swap-us-lanes", NULL)) - hub->port_swap |= BIT(0); + false, &hub->port_swap); /* The following parameters are currently not exposed to devicetree, but * may be as soon as needed. diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 30790240aec6..05b80211290d 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -28,6 +28,8 @@ * status of a command. */ +#include <linux/blkdev.h> +#include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/mutex.h> @@ -99,6 +101,7 @@ static int slave_alloc (struct scsi_device *sdev) static int slave_configure(struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); + struct device *dev = us->pusb_dev->bus->sysdev; /* * Many devices have trouble transferring more than 32KB at a time, @@ -129,6 +132,14 @@ static int slave_configure(struct scsi_device *sdev) } /* + * The max_hw_sectors should be up to maximum size of a mapping for + * the device. Otherwise, a DMA API might fail on swiotlb environment. + */ + blk_queue_max_hw_sectors(sdev->request_queue, + min_t(size_t, queue_max_hw_sectors(sdev->request_queue), + dma_max_mapping_size(dev) >> SECTOR_SHIFT)); + + /* * Some USB host controllers can't do DMA; they have to use PIO. * They indicate this by setting their dma_mask to NULL. For * such controllers we need to make sure the block layer sets diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 819296332913..42a8c2a13ab1 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -96,7 +96,7 @@ struct vhost_uaddr { }; #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 -#define VHOST_ARCH_CAN_ACCEL_UACCESS 1 +#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #else #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #endif diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 4c339c7e66e5..a446a7221e13 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) goto out_put_map; if (!use_ptemod) { - err = vm_map_pages(vma, map->pages, map->count); + err = vm_map_pages_zero(vma, map->pages, map->count); if (err) goto out_put_map; } else { diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 2f5ce7230a43..c6070e70dd73 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata) return 0; } -struct remap_pfn { - struct mm_struct *mm; - struct page **pages; - pgprot_t prot; - unsigned long i; -}; - -static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data) -{ - struct remap_pfn *r = data; - struct page *page = r->pages[r->i]; - pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot)); - - set_pte_at(r->mm, addr, ptep, pte); - r->i++; - - return 0; -} - static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) { struct privcmd_data *data = file->private_data; @@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) goto out; } - if (xen_feature(XENFEAT_auto_translated_physmap)) { + if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && + xen_feature(XENFEAT_auto_translated_physmap)) { unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE); struct page **pages; unsigned int i; @@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) if (rc) goto out; - if (xen_feature(XENFEAT_auto_translated_physmap)) { - struct remap_pfn r = { - .mm = vma->vm_mm, - .pages = vma->vm_private_data, - .prot = vma->vm_page_prot, - }; - - rc = apply_to_page_range(r.mm, kdata.addr, - kdata.num << PAGE_SHIFT, - remap_pfn_fn, &r); + if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && + xen_feature(XENFEAT_auto_translated_physmap)) { + rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT); } else { unsigned int domid = (xdata.flags & XENMEM_rsrc_acq_caller_owned) ? diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index cfbe46785a3b..ae1df496bf38 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address) return xen_phys_to_bus(virt_to_phys(address)); } -static int check_pages_physically_contiguous(unsigned long xen_pfn, - unsigned int offset, - size_t length) +static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) { - unsigned long next_bfn; - int i; - int nr_pages; + unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); + unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); next_bfn = pfn_to_bfn(xen_pfn); - nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT; - for (i = 1; i < nr_pages; i++) { + for (i = 1; i < nr_pages; i++) if (pfn_to_bfn(++xen_pfn) != ++next_bfn) - return 0; - } - return 1; -} + return 1; -static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) -{ - unsigned long xen_pfn = XEN_PFN_DOWN(p); - unsigned int offset = p & ~XEN_PAGE_MASK; - - if (offset + size <= XEN_PAGE_SIZE) - return 0; - if (check_pages_physically_contiguous(xen_pfn, offset, size)) - return 0; - return 1; + return 0; } static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) @@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); return NULL; } + SetPageXenRemapped(virt_to_page(ret)); } memset(ret, 0, size); return ret; @@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, /* Convert the size to actually allocated. */ size = 1UL << (order + XEN_PAGE_SHIFT); - if (((dev_addr + size - 1 <= dma_mask)) || - range_straddles_page_boundary(phys, size)) + if (!WARN_ON((dev_addr + size - 1 > dma_mask) || + range_straddles_page_boundary(phys, size)) && + TestClearPageXenRemapped(virt_to_page(vaddr))) xen_destroy_contiguous_region(phys, order); xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index 73427d8e0116..e5694133ebe5 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c @@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value, { int err; u16 old_value; - pci_power_t new_state, old_state; + pci_power_t new_state; err = pci_read_config_word(dev, offset, &old_value); if (err) goto out; - old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK); new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK); new_value &= PM_OK_BITS; diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index ba883a80b3c0..7b1077f0abcb 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, return 0; } EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); + +struct remap_pfn { + struct mm_struct *mm; + struct page **pages; + pgprot_t prot; + unsigned long i; +}; + +static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + struct page *page = r->pages[r->i]; + pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot)); + + set_pte_at(r->mm, addr, ptep, pte); + r->i++; + + return 0; +} + +/* Used by the privcmd module, but has to be built-in on ARM */ +int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len) +{ + struct remap_pfn r = { + .mm = vma->vm_mm, + .pages = vma->vm_private_data, + .prot = vma->vm_page_prot, + }; + + return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r); +} +EXPORT_SYMBOL_GPL(xen_remap_vma_range); |