diff options
28 files changed, 417 insertions, 110 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index d5674026512a..a8688a92c760 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -120,6 +120,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) if (!chip_ctx) return -ENOMEM; chip_ctx->chip_num = bp->chip_num; + chip_ctx->hw_stats_size = bp->hw_ring_stats_size; rdev->chip_ctx = chip_ctx; /* rest members to follow eventually */ @@ -550,6 +551,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, dma_addr_t dma_map, u32 *fw_stats_ctx_id) { + struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx; struct hwrm_stat_ctx_alloc_output resp = {0}; struct hwrm_stat_ctx_alloc_input req = {0}; struct bnxt_en_dev *en_dev = rdev->en_dev; @@ -566,7 +568,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); req.update_period_ms = cpu_to_le32(1000); req.stats_dma_addr = cpu_to_le64(dma_map); - req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext)); + req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size); req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 17f0701b3cee..44282a8cdd4f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -56,6 +56,7 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, struct bnxt_qplib_stats *stats); static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, + struct bnxt_qplib_chip_ctx *cctx, struct bnxt_qplib_stats *stats); /* PBL */ @@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, goto fail; stats_alloc: /* Stats */ - rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats); + rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats); if (rc) goto fail; @@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, } static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, + struct bnxt_qplib_chip_ctx *cctx, struct bnxt_qplib_stats *stats) { memset(stats, 0, sizeof(*stats)); stats->fw_id = -1; - /* 128 byte aligned context memory is required only for 57500. - * However making this unconditional, it does not harm previous - * generation. - */ - stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128); + stats->size = cctx->hw_stats_size; stats->dma = dma_alloc_coherent(&pdev->dev, stats->size, &stats->dma_map, GFP_KERNEL); if (!stats->dma) { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index c291f495ae91..91031502e8f5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -54,6 +54,7 @@ struct bnxt_qplib_chip_ctx { u16 chip_num; u8 chip_rev; u8 chip_metal; + u16 hw_stats_size; struct bnxt_qplib_drv_modes modes; }; diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index b1023a7d0bd1..f1e5515256e0 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -2845,7 +2845,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf, * parses fpm commit info and copy base value * of hmc objects in hmc_info */ -static enum irdma_status_code +static void irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf, struct irdma_hmc_obj_info *info, u32 *sd) { @@ -2915,7 +2915,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf, else *sd = (u32)(size >> 21); - return 0; } /** @@ -4187,11 +4186,9 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, * @dev: sc device struct * @count: allocate count */ -enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) +void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) { writel(count, dev->hw_regs[IRDMA_AEQALLOC]); - - return 0; } /** @@ -4434,9 +4431,9 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, &commit_fpm_mem, true, wait_type); if (!ret_code) - ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf, - hmc_info->hmc_obj, - &hmc_info->sd_table.sd_cnt); + irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf, + hmc_info->hmc_obj, + &hmc_info->sd_table.sd_cnt); print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16, 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE, false); diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 7afb8a6a0526..00de5ee9a260 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -1920,7 +1920,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf) * irdma_set_hw_rsrc - set hw memory resources. * @rf: RDMA PCI function */ -static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf) +static void irdma_set_hw_rsrc(struct irdma_pci_f *rf) { rf->allocated_qps = (void *)(rf->mem_rsrc + (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); @@ -1937,8 +1937,6 @@ static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf) spin_lock_init(&rf->arp_lock); spin_lock_init(&rf->qptable_lock); spin_lock_init(&rf->qh_list_lock); - - return 0; } /** @@ -2000,9 +1998,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; - ret = irdma_set_hw_rsrc(rf); - if (ret) - goto set_hw_rsrc_fail; + irdma_set_hw_rsrc(rf); set_bit(0, rf->allocated_mrs); set_bit(0, rf->allocated_qps); @@ -2025,9 +2021,6 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) return 0; -set_hw_rsrc_fail: - kfree(rf->mem_rsrc); - rf->mem_rsrc = NULL; mem_rsrc_kzalloc_fail: kfree(rf->allocated_ws_nodes); rf->allocated_ws_nodes = NULL; diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index ea59432351fb..51a41359e0b4 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -215,10 +215,10 @@ static void irdma_remove(struct auxiliary_device *aux_dev) pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn)); } -static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf) +static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf, + struct ice_vsi *vsi) { struct irdma_pci_f *rf = iwdev->rf; - struct ice_vsi *vsi = ice_get_main_vsi(pf); rf->cdev = pf; rf->gen_ops.register_qset = irdma_lan_register_qset; @@ -253,12 +253,15 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ struct iidc_auxiliary_dev, adev); struct ice_pf *pf = iidc_adev->pf; + struct ice_vsi *vsi = ice_get_main_vsi(pf); struct iidc_qos_params qos_info = {}; struct irdma_device *iwdev; struct irdma_pci_f *rf; struct irdma_l2params l2params = {}; int err; + if (!vsi) + return -EIO; iwdev = ib_alloc_device(irdma_device, ibdev); if (!iwdev) return -ENOMEM; @@ -268,7 +271,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ return -ENOMEM; } - irdma_fill_device_info(iwdev, pf); + irdma_fill_device_info(iwdev, pf, vsi); rf = iwdev->rf; if (irdma_ctrl_init_hw(rf)) { diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index 7387b83e826d..874bc25a938b 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -1222,8 +1222,7 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, struct irdma_aeq_init_info *info); enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, struct irdma_aeqe_info *info); -enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, - u32 count); +void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count); void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, int abi_ver); diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index a6d52c20091c..5fb92de1f015 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -931,7 +931,7 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp, enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, struct irdma_post_rq_info *info) { - u32 total_size = 0, wqe_idx, i, byte_off; + u32 wqe_idx, i, byte_off; u32 addl_frag_cnt; __le64 *wqe; u64 hdr; @@ -939,9 +939,6 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, if (qp->max_rq_frag_cnt < info->num_sges) return IRDMA_ERR_INVALID_FRAG_COUNT; - for (i = 0; i < info->num_sges; i++) - total_size += info->sg_list[i].len; - wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx); if (!wqe) return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 9712f6902ba8..717147ed0519 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -557,7 +557,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) * @iwqp: qp ptr * @init_info: initialize info to return */ -static int irdma_setup_virt_qp(struct irdma_device *iwdev, +static void irdma_setup_virt_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, struct irdma_qp_init_info *init_info) { @@ -574,8 +574,6 @@ static int irdma_setup_virt_qp(struct irdma_device *iwdev, init_info->sq_pa = qpmr->sq_pbl.addr; init_info->rq_pa = qpmr->rq_pbl.addr; } - - return 0; } /** @@ -914,7 +912,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd, } } init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; - err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info); + irdma_setup_virt_qp(iwdev, iwqp, &init_info); } else { init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 6aabcb4de235..be4bcb420fab 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int num_buf; void *vaddr; int err; + int i; umem = ib_umem_get(pd->ibpd.device, start, length, access); if (IS_ERR(umem)) { - pr_warn("err %d from rxe_umem_get\n", - (int)PTR_ERR(umem)); + pr_warn("%s: Unable to pin memory region err = %d\n", + __func__, (int)PTR_ERR(umem)); err = PTR_ERR(umem); - goto err1; + goto err_out; } mr->umem = umem; @@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, err = rxe_mr_alloc(mr, num_buf); if (err) { - pr_warn("err %d from rxe_mr_alloc\n", err); - ib_umem_release(umem); - goto err1; + pr_warn("%s: Unable to allocate memory for map\n", + __func__); + goto err_release_umem; } mr->page_shift = PAGE_SHIFT; @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, vaddr = page_address(sg_page_iter_page(&sg_iter)); if (!vaddr) { - pr_warn("null vaddr\n"); - ib_umem_release(umem); + pr_warn("%s: Unable to get virtual address\n", + __func__); err = -ENOMEM; - goto err1; + goto err_cleanup_map; } buf->addr = (uintptr_t)vaddr; @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, return 0; -err1: +err_cleanup_map: + for (i = 0; i < mr->num_map; i++) + kfree(mr->map[i]); + kfree(mr->map); +err_release_umem: + ib_umem_release(umem); +err_out: return err; } diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index b9da58ee9b1e..3481479a2942 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -46,34 +46,79 @@ #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE #define AMD_PMC_RESULT_FAILED 0xFF +/* FCH SSC Registers */ +#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30 +#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34 +#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38 +#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C +#define FCH_SSC_MAPPING_SIZE 0x800 +#define FCH_BASE_PHY_ADDR_LOW 0xFED81100 +#define FCH_BASE_PHY_ADDR_HIGH 0x00000000 + +/* SMU Message Definations */ +#define SMU_MSG_GETSMUVERSION 0x02 +#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04 +#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05 +#define SMU_MSG_LOG_START 0x06 +#define SMU_MSG_LOG_RESET 0x07 +#define SMU_MSG_LOG_DUMP_DATA 0x08 +#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09 /* List of supported CPU ids */ #define AMD_CPU_ID_RV 0x15D0 #define AMD_CPU_ID_RN 0x1630 #define AMD_CPU_ID_PCO AMD_CPU_ID_RV #define AMD_CPU_ID_CZN AMD_CPU_ID_RN +#define AMD_CPU_ID_YC 0x14B5 -#define AMD_SMU_FW_VERSION 0x0 #define PMC_MSG_DELAY_MIN_US 100 #define RESPONSE_REGISTER_LOOP_MAX 200 +#define SOC_SUBSYSTEM_IP_MAX 12 +#define DELAY_MIN_US 2000 +#define DELAY_MAX_US 3000 enum amd_pmc_def { MSG_TEST = 0x01, MSG_OS_HINT_PCO, MSG_OS_HINT_RN, }; +struct amd_pmc_bit_map { + const char *name; + u32 bit_mask; +}; + +static const struct amd_pmc_bit_map soc15_ip_blk[] = { + {"DISPLAY", BIT(0)}, + {"CPU", BIT(1)}, + {"GFX", BIT(2)}, + {"VDD", BIT(3)}, + {"ACP", BIT(4)}, + {"VCN", BIT(5)}, + {"ISP", BIT(6)}, + {"NBIO", BIT(7)}, + {"DF", BIT(8)}, + {"USB0", BIT(9)}, + {"USB1", BIT(10)}, + {"LAPIC", BIT(11)}, + {} +}; + struct amd_pmc_dev { void __iomem *regbase; - void __iomem *smu_base; + void __iomem *smu_virt_addr; + void __iomem *fch_virt_addr; u32 base_addr; u32 cpu_id; + u32 active_ips; struct device *dev; + struct mutex lock; /* generic mutex lock */ #if IS_ENABLED(CONFIG_DEBUG_FS) struct dentry *dbgfs_dir; #endif /* CONFIG_DEBUG_FS */ }; static struct amd_pmc_dev pmc; +static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret); static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset) { @@ -85,18 +130,77 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3 iowrite32(val, dev->regbase + reg_offset); } +struct smu_metrics { + u32 table_version; + u32 hint_count; + u32 s0i3_cyclecount; + u32 timein_s0i2; + u64 timeentering_s0i3_lastcapture; + u64 timeentering_s0i3_totaltime; + u64 timeto_resume_to_os_lastcapture; + u64 timeto_resume_to_os_totaltime; + u64 timein_s0i3_lastcapture; + u64 timein_s0i3_totaltime; + u64 timein_swdrips_lastcapture; + u64 timein_swdrips_totaltime; + u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX]; + u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX]; +} __packed; + #ifdef CONFIG_DEBUG_FS static int smu_fw_info_show(struct seq_file *s, void *unused) { struct amd_pmc_dev *dev = s->private; - u32 value; + struct smu_metrics table; + int idx; + + if (dev->cpu_id == AMD_CPU_ID_PCO) + return -EINVAL; + + memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics)); + + seq_puts(s, "\n=== SMU Statistics ===\n"); + seq_printf(s, "Table Version: %d\n", table.table_version); + seq_printf(s, "Hint Count: %d\n", table.hint_count); + seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount); + seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture); + seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture); + + seq_puts(s, "\n=== Active time (in us) ===\n"); + for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) { + if (soc15_ip_blk[idx].bit_mask & dev->active_ips) + seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name, + table.timecondition_notmet_lastcapture[idx]); + } - value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION); - seq_printf(s, "SMU FW Info: %x\n", value); return 0; } DEFINE_SHOW_ATTRIBUTE(smu_fw_info); +static int s0ix_stats_show(struct seq_file *s, void *unused) +{ + struct amd_pmc_dev *dev = s->private; + u64 entry_time, exit_time, residency; + + entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET); + entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET); + + exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET); + exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET); + + /* It's in 48MHz. We need to convert it */ + residency = exit_time - entry_time; + do_div(residency, 48); + + seq_puts(s, "=== S0ix statistics ===\n"); + seq_printf(s, "S0ix Entry Time: %lld\n", entry_time); + seq_printf(s, "S0ix Exit Time: %lld\n", exit_time); + seq_printf(s, "Residency Time: %lld\n", residency); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(s0ix_stats); + static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) { debugfs_remove_recursive(dev->dbgfs_dir); @@ -107,6 +211,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL); debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev, &smu_fw_info_fops); + debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev, + &s0ix_stats_fops); } #else static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) @@ -118,6 +224,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) } #endif /* CONFIG_DEBUG_FS */ +static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) +{ + u32 phys_addr_low, phys_addr_hi; + u64 smu_phys_addr; + + if (dev->cpu_id == AMD_CPU_ID_PCO) + return -EINVAL; + + /* Get Active devices list from SMU */ + amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1); + + /* Get dram address */ + amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1); + amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1); + smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); + + dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics)); + if (!dev->smu_virt_addr) + return -ENOMEM; + + /* Start the logging */ + amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0); + + return 0; +} + static void amd_pmc_dump_registers(struct amd_pmc_dev *dev) { u32 value; @@ -132,19 +264,19 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev) dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value); } -static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set) +static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret) { int rc; - u8 msg; u32 val; + mutex_lock(&dev->lock); /* Wait until we get a valid response */ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE, - val, val > 0, PMC_MSG_DELAY_MIN_US, + val, val != 0, PMC_MSG_DELAY_MIN_US, PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); if (rc) { dev_err(dev->dev, "failed to talk to SMU\n"); - return rc; + goto out_unlock; } /* Write zero to response register */ @@ -154,34 +286,91 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set) amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set); /* Write message ID to message ID register */ - msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO; amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg); - return 0; + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE, + val, val != 0, PMC_MSG_DELAY_MIN_US, + PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "SMU response timed out\n"); + goto out_unlock; + } + + switch (val) { + case AMD_PMC_RESULT_OK: + if (ret) { + /* PMFW may take longer time to return back the data */ + usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US); + *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT); + } + break; + case AMD_PMC_RESULT_CMD_REJECT_BUSY: + dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val); + rc = -EBUSY; + goto out_unlock; + case AMD_PMC_RESULT_CMD_UNKNOWN: + dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val); + rc = -EINVAL; + goto out_unlock; + case AMD_PMC_RESULT_CMD_REJECT_PREREQ: + case AMD_PMC_RESULT_FAILED: + default: + dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val); + rc = -EIO; + goto out_unlock; + } + +out_unlock: + mutex_unlock(&dev->lock); + amd_pmc_dump_registers(dev); + return rc; +} + +static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) +{ + switch (dev->cpu_id) { + case AMD_CPU_ID_PCO: + return MSG_OS_HINT_PCO; + case AMD_CPU_ID_RN: + case AMD_CPU_ID_YC: + return MSG_OS_HINT_RN; + } + return -EINVAL; } static int __maybe_unused amd_pmc_suspend(struct device *dev) { struct amd_pmc_dev *pdev = dev_get_drvdata(dev); int rc; + u8 msg; + + /* Reset and Start SMU logging - to monitor the s0i3 stats */ + amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0); + amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0); - rc = amd_pmc_send_cmd(pdev, 1); + msg = amd_pmc_get_os_hint(pdev); + rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0); if (rc) dev_err(pdev->dev, "suspend failed\n"); - amd_pmc_dump_registers(pdev); - return 0; + return rc; } static int __maybe_unused amd_pmc_resume(struct device *dev) { struct amd_pmc_dev *pdev = dev_get_drvdata(dev); int rc; + u8 msg; + + /* Let SMU know that we are looking for stats */ + amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0); - rc = amd_pmc_send_cmd(pdev, 0); + msg = amd_pmc_get_os_hint(pdev); + rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0); if (rc) dev_err(pdev->dev, "resume failed\n"); - amd_pmc_dump_registers(pdev); return 0; } @@ -190,6 +379,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = { }; static const struct pci_device_id pmc_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) }, @@ -201,9 +391,8 @@ static int amd_pmc_probe(struct platform_device *pdev) { struct amd_pmc_dev *dev = &pmc; struct pci_dev *rdev; - u32 base_addr_lo; - u32 base_addr_hi; - u64 base_addr; + u32 base_addr_lo, base_addr_hi; + u64 base_addr, fch_phys_addr; int err; u32 val; @@ -248,16 +437,25 @@ static int amd_pmc_probe(struct platform_device *pdev) pci_dev_put(rdev); base_addr = ((u64)base_addr_hi << 32 | base_addr_lo); - dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE); - if (!dev->smu_base) - return -ENOMEM; - dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET, AMD_PMC_MAPPING_SIZE); if (!dev->regbase) return -ENOMEM; - amd_pmc_dump_registers(dev); + mutex_init(&dev->lock); + + /* Use FCH registers to get the S0ix stats */ + base_addr_lo = FCH_BASE_PHY_ADDR_LOW; + base_addr_hi = FCH_BASE_PHY_ADDR_HIGH; + fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo); + dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE); + if (!dev->fch_virt_addr) + return -ENOMEM; + + /* Use SMU to get the s0i3 debug stats */ + err = amd_pmc_setup_smu_logging(dev); + if (err) + dev_err(dev->dev, "SMU debugging info not supported on this platform\n"); platform_set_drvdata(pdev, dev); amd_pmc_dbgfs_register(dev); @@ -269,11 +467,14 @@ static int amd_pmc_remove(struct platform_device *pdev) struct amd_pmc_dev *dev = platform_get_drvdata(pdev); amd_pmc_dbgfs_unregister(dev); + mutex_destroy(&dev->lock); return 0; } static const struct acpi_device_id amd_pmc_acpi_ids[] = { {"AMDI0005", 0}, + {"AMDI0006", 0}, + {"AMDI0007", 0}, {"AMD0004", 0}, { } }; diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c index 5529d7b0abea..fbb224a82e34 100644 --- a/drivers/platform/x86/gigabyte-wmi.c +++ b/drivers/platform/x86/gigabyte-wmi.c @@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev) static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"), + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"), diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 078648a9201b..e5fbe017f8e1 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -25,6 +25,7 @@ static const struct acpi_device_id intel_hid_ids[] = { {"INT33D5", 0}, {"INTC1051", 0}, {"INTC1054", 0}, + {"INTC1070", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, intel_hid_ids); diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 3671b5d20613..6cfed4427fb0 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -571,6 +571,11 @@ static ssize_t current_value_store(struct kobject *kobj, else ret = tlmi_save_bios_settings(""); + if (!ret && !tlmi_priv.pending_changes) { + tlmi_priv.pending_changes = true; + /* let userland know it may need to check reboot pending again */ + kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE); + } out: kfree(auth_str); kfree(set_str); @@ -647,6 +652,14 @@ static struct kobj_type tlmi_pwd_setting_ktype = { .sysfs_ops = &tlmi_kobj_sysfs_ops, }; +static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", tlmi_priv.pending_changes); +} + +static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot); + /* ---- Initialisation --------------------------------------------------------- */ static void tlmi_release_attr(void) { @@ -659,6 +672,7 @@ static void tlmi_release_attr(void) kobject_put(&tlmi_priv.setting[i]->kobj); } } + sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr); kset_unregister(tlmi_priv.attribute_kset); /* Authentication structures */ @@ -709,8 +723,8 @@ static int tlmi_sysfs_init(void) /* Build attribute */ tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset; - ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype, - NULL, "%s", tlmi_priv.setting[i]->display_name); + ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL, + "%s", tlmi_priv.setting[i]->display_name); if (ret) goto fail_create_attr; @@ -719,6 +733,10 @@ static int tlmi_sysfs_init(void) goto fail_create_attr; } + ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr); + if (ret) + goto fail_create_attr; + /* Create authentication entries */ tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL, &tlmi_priv.class_dev->kobj); @@ -727,8 +745,7 @@ static int tlmi_sysfs_init(void) goto fail_create_attr; } tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype, - NULL, "%s", "Admin"); + ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin"); if (ret) goto fail_create_attr; @@ -737,8 +754,7 @@ static int tlmi_sysfs_init(void) goto fail_create_attr; tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype, - NULL, "%s", "System"); + ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "System"); if (ret) goto fail_create_attr; @@ -818,6 +834,7 @@ static int tlmi_analyze(void) pr_info("Error retrieving possible values for %d : %s\n", i, setting->display_name); } + kobject_init(&setting->kobj, &tlmi_attr_setting_ktype); tlmi_priv.setting[i] = setting; tlmi_priv.settings_count++; kfree(item); @@ -844,10 +861,12 @@ static int tlmi_analyze(void) if (pwdcfg.password_state & TLMI_PAP_PWD) tlmi_priv.pwd_admin->valid = true; + kobject_init(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype); + tlmi_priv.pwd_power = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL); if (!tlmi_priv.pwd_power) { ret = -ENOMEM; - goto fail_clear_attr; + goto fail_free_pwd_admin; } strscpy(tlmi_priv.pwd_power->kbdlang, "us", TLMI_LANG_MAXLEN); tlmi_priv.pwd_power->encoding = TLMI_ENCODING_ASCII; @@ -859,11 +878,19 @@ static int tlmi_analyze(void) if (pwdcfg.password_state & TLMI_POP_PWD) tlmi_priv.pwd_power->valid = true; + kobject_init(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype); + return 0; +fail_free_pwd_admin: + kfree(tlmi_priv.pwd_admin); fail_clear_attr: - for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) - kfree(tlmi_priv.setting[i]); + for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) { + if (tlmi_priv.setting[i]) { + kfree(tlmi_priv.setting[i]->possible_values); + kfree(tlmi_priv.setting[i]); + } + } return ret; } diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h index 6fa8da7af6c7..eb598846628a 100644 --- a/drivers/platform/x86/think-lmi.h +++ b/drivers/platform/x86/think-lmi.h @@ -60,6 +60,7 @@ struct think_lmi { bool can_get_bios_selections; bool can_set_bios_password; bool can_get_password_settings; + bool pending_changes; struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT]; struct device *class_dev; diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c index b010e4ca3383..11c60a273446 100644 --- a/drivers/platform/x86/wireless-hotkey.c +++ b/drivers/platform/x86/wireless-hotkey.c @@ -78,7 +78,7 @@ static int wl_add(struct acpi_device *device) err = wireless_input_setup(); if (err) - pr_err("Failed to setup hp wireless hotkeys\n"); + pr_err("Failed to setup wireless hotkeys\n"); return err; } diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 14292dba3a12..2c2f179b6977 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -106,12 +106,11 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) return err; } -static bool ext2_check_page(struct page *page, int quiet) +static bool ext2_check_page(struct page *page, int quiet, char *kaddr) { struct inode *dir = page->mapping->host; struct super_block *sb = dir->i_sb; unsigned chunk_size = ext2_chunk_size(dir); - char *kaddr = page_address(page); u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); unsigned offs, rec_len; unsigned limit = PAGE_SIZE; @@ -205,7 +204,8 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n, if (!IS_ERR(page)) { *page_addr = kmap_local_page(page); if (unlikely(!PageChecked(page))) { - if (PageError(page) || !ext2_check_page(page, quiet)) + if (PageError(page) || !ext2_check_page(page, quiet, + *page_addr)) goto fail; } } @@ -584,10 +584,10 @@ out_unlock: * ext2_delete_entry deletes a directory entry by merging it with the * previous entry. Page is up-to-date. */ -int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page ) +int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page, + char *kaddr) { struct inode *inode = page->mapping->host; - char *kaddr = page_address(page); unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1); unsigned to = ((char *)dir - kaddr) + ext2_rec_len_from_disk(dir->rec_len); @@ -607,7 +607,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page ) de = ext2_next_entry(de); } if (pde) - from = (char*)pde - (char*)page_address(page); + from = (char *)pde - kaddr; pos = page_offset(page) + from; lock_page(page); err = ext2_prepare_chunk(page, pos, to - from); diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index b0a694820cb7..e512630cb63e 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -740,7 +740,8 @@ extern int ext2_inode_by_name(struct inode *dir, extern int ext2_make_empty(struct inode *, struct inode *); extern struct ext2_dir_entry_2 *ext2_find_entry(struct inode *, const struct qstr *, struct page **, void **res_page_addr); -extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *); +extern int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page, + char *kaddr); extern int ext2_empty_dir (struct inode *); extern struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, void **pa); extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, void *, diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 1f69b81655b6..5f6b7560eb3f 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -293,7 +293,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry) goto out; } - err = ext2_delete_entry (de, page); + err = ext2_delete_entry (de, page, page_addr); ext2_put_page(page, page_addr); if (err) goto out; @@ -397,7 +397,7 @@ static int ext2_rename (struct user_namespace * mnt_userns, old_inode->i_ctime = current_time(old_inode); mark_inode_dirty(old_inode); - ext2_delete_entry(old_de, old_page); + ext2_delete_entry(old_de, old_page, old_page_addr); if (dir_de) { if (old_dir != new_dir) diff --git a/fs/internal.h b/fs/internal.h index 3ce8edbaa3ca..82e8eb32ff3d 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -61,7 +61,6 @@ extern void __init chrdev_init(void); */ extern const struct fs_context_operations legacy_fs_context_ops; extern int parse_monolithic_mount_data(struct fs_context *, void *); -extern void fc_drop_locked(struct fs_context *); extern void vfs_clean_context(struct fs_context *fc); extern int finish_clean_context(struct fs_context *fc); diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 476a7ff49482..ef42729216d1 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -387,6 +387,24 @@ void pathrelse(struct treepath *search_path) search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } +static int has_valid_deh_location(struct buffer_head *bh, struct item_head *ih) +{ + struct reiserfs_de_head *deh; + int i; + + deh = B_I_DEH(bh, ih); + for (i = 0; i < ih_entry_count(ih); i++) { + if (deh_location(&deh[i]) > ih_item_len(ih)) { + reiserfs_warning(NULL, "reiserfs-5094", + "directory entry location seems wrong %h", + &deh[i]); + return 0; + } + } + + return 1; +} + static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; @@ -454,11 +472,14 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) "(second one): %h", ih); return 0; } - if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) { - reiserfs_warning(NULL, "reiserfs-5093", - "item entry count seems wrong %h", - ih); - return 0; + if (is_direntry_le_ih(ih)) { + if (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE)) { + reiserfs_warning(NULL, "reiserfs-5093", + "item entry count seems wrong %h", + ih); + return 0; + } + return has_valid_deh_location(bh, ih); } prev_location = ih_location(ih); } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 3ffafc73acf0..58481f8d63d5 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -2082,6 +2082,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) unlock_new_inode(root_inode); } + if (!S_ISDIR(root_inode->i_mode) || !inode_get_bytes(root_inode) || + !root_inode->i_size) { + SWARN(silent, s, "", "corrupt root inode, run fsck"); + iput(root_inode); + errval = -EUCLEAN; + goto error; + } + s->s_root = d_make_root(root_inode); if (!s->s_root) goto error; diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index e2bc16300c82..6b54982fc5f3 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -141,6 +141,7 @@ extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); extern int vfs_parse_fs_param_source(struct fs_context *fc, struct fs_parameter *param); +extern void fc_drop_locked(struct fs_context *fc); /* * sget() wrappers to be called from the ->get_tree() op. diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index e33997b4d750..edc346a77c91 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */ /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #ifndef _USR_IDXD_H_ #define _USR_IDXD_H_ diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h index 26b638a7ad97..a7085e092d34 100644 --- a/include/uapi/rdma/irdma-abi.h +++ b/include/uapi/rdma/irdma-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */ /* * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved. * Copyright (c) 2005 Topspin Communications. All rights reserved. diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 8d6bf56ed77a..de2c432dee20 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -1221,9 +1221,7 @@ int cgroup1_get_tree(struct fs_context *fc) ret = cgroup_do_get_tree(fc); if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) { - struct super_block *sb = fc->root->d_sb; - dput(fc->root); - deactivate_locked_super(sb); + fc_drop_locked(fc); ret = 1; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 50142fc08902..f148eacda55a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3676,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work) unbound_release_work); struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; - bool is_last; + bool is_last = false; - if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) - return; + /* + * when @pwq is not linked, it doesn't hold any reference to the + * @wq, and @wq is invalid to access. + */ + if (!list_empty(&pwq->pwqs_node)) { + if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) + return; - mutex_lock(&wq->mutex); - list_del_rcu(&pwq->pwqs_node); - is_last = list_empty(&wq->pwqs); - mutex_unlock(&wq->mutex); + mutex_lock(&wq->mutex); + list_del_rcu(&pwq->pwqs_node); + is_last = list_empty(&wq->pwqs); + mutex_unlock(&wq->mutex); + } mutex_lock(&wq_pool_mutex); put_unbound_pool(pool); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 23c92ad15c61..ba7ced947e51 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1526,6 +1526,53 @@ out: return err; } +static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + scm->fp = scm_fp_dup(UNIXCB(skb).fp); + + /* + * Garbage collection of unix sockets starts by selecting a set of + * candidate sockets which have reference only from being in flight + * (total_refs == inflight_refs). This condition is checked once during + * the candidate collection phase, and candidates are marked as such, so + * that non-candidates can later be ignored. While inflight_refs is + * protected by unix_gc_lock, total_refs (file count) is not, hence this + * is an instantaneous decision. + * + * Once a candidate, however, the socket must not be reinstalled into a + * file descriptor while the garbage collection is in progress. + * + * If the above conditions are met, then the directed graph of + * candidates (*) does not change while unix_gc_lock is held. + * + * Any operations that changes the file count through file descriptors + * (dup, close, sendmsg) does not change the graph since candidates are + * not installed in fds. + * + * Dequeing a candidate via recvmsg would install it into an fd, but + * that takes unix_gc_lock to decrement the inflight count, so it's + * serialized with garbage collection. + * + * MSG_PEEK is special in that it does not change the inflight count, + * yet does install the socket into an fd. The following lock/unlock + * pair is to ensure serialization with garbage collection. It must be + * done between incrementing the file count and installing the file into + * an fd. + * + * If garbage collection starts after the barrier provided by the + * lock/unlock, then it will see the elevated refcount and not mark this + * as a candidate. If a garbage collection is already in progress + * before the file count was incremented, then the lock/unlock pair will + * ensure that garbage collection is finished before progressing to + * installing the fd. + * + * (*) A -> B where B is on the queue of A or B is on the queue of C + * which is on the queue of listening socket A. + */ + spin_lock(&unix_gc_lock); + spin_unlock(&unix_gc_lock); +} + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) { int err = 0; @@ -2175,7 +2222,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, sk_peek_offset_fwd(sk, size); if (UNIXCB(skb).fp) - scm.fp = scm_fp_dup(UNIXCB(skb).fp); + unix_peek_fds(&scm, skb); } err = (flags & MSG_TRUNC) ? skb->len - skip : size; @@ -2418,7 +2465,7 @@ unlock: /* It is questionable, see note in unix_dgram_recvmsg. */ if (UNIXCB(skb).fp) - scm.fp = scm_fp_dup(UNIXCB(skb).fp); + unix_peek_fds(&scm, skb); sk_peek_offset_fwd(sk, chunk); |