diff options
-rw-r--r-- | Documentation/ABI/testing/sysfs-kernel-iommu_groups | 30 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/iommu/arm,smmu.yaml | 9 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 2 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu-impl.c | 13 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c | 17 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 270 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu.c | 51 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu.h | 5 | ||||
-rw-r--r-- | drivers/iommu/intel/iommu.c | 7 | ||||
-rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 10 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 248 | ||||
-rw-r--r-- | include/linux/io-pgtable.h | 8 | ||||
-rw-r--r-- | include/linux/iommu.h | 1 |
13 files changed, 607 insertions, 64 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups b/Documentation/ABI/testing/sysfs-kernel-iommu_groups index 017f5bc3920c..0fedbb0f94e4 100644 --- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups +++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups @@ -33,3 +33,33 @@ Description: In case an RMRR is used only by graphics or USB devices it is now exposed as "direct-relaxable" instead of "direct". In device assignment use case, for instance, those RMRR are considered to be relaxable and safe. + +What: /sys/kernel/iommu_groups/<grp_id>/type +Date: November 2020 +KernelVersion: v5.11 +Contact: Sai Praneeth Prakhya <[email protected]> +Description: /sys/kernel/iommu_groups/<grp_id>/type shows the type of default + domain in use by iommu for this group. See include/linux/iommu.h + for possible read values. A privileged user could request kernel to + change the group type by writing to this file. Valid write values: + + ======== ====================================================== + DMA All the DMA transactions from the device in this group + are translated by the iommu. + identity All the DMA transactions from the device in this group + are not translated by the iommu. + auto Change to the type the device was booted with. + ======== ====================================================== + + The default domain type of a group may be modified only when + + - The group has only one device. + - The device in the group is not bound to any device driver. + So, the users must unbind the appropriate driver before + changing the default domain type. + + Unbinding a device driver will take away the driver's control + over the device and if done on devices that host root file + system could lead to catastrophic effects (the users might + need to reboot the machine to get it to normal state). So, it's + expected that the users understand what they're doing. diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml index 503160a7b9a0..3b63f2ae24db 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml @@ -28,8 +28,6 @@ properties: - enum: - qcom,msm8996-smmu-v2 - qcom,msm8998-smmu-v2 - - qcom,sc7180-smmu-v2 - - qcom,sdm845-smmu-v2 - const: qcom,smmu-v2 - description: Qcom SoCs implementing "arm,mmu-500" @@ -40,6 +38,13 @@ properties: - qcom,sm8150-smmu-500 - qcom,sm8250-smmu-500 - const: arm,mmu-500 + - description: Qcom Adreno GPUs implementing "arm,smmu-v2" + items: + - enum: + - qcom,sc7180-smmu-v2 + - qcom,sdm845-smmu-v2 + - const: qcom,adreno-smmu + - const: qcom,smmu-v2 - description: Marvell SoCs implementing "arm,mmu-500" items: - const: marvell,ap806-smmu-500 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index e634bbe60573..c9cafce51835 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -33,7 +33,7 @@ #include "arm-smmu-v3.h" -static bool disable_bypass = 1; +static bool disable_bypass = true; module_param(disable_bypass, bool, 0444); MODULE_PARM_DESC(disable_bypass, "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 88f17cc33023..136872e77195 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -12,7 +12,7 @@ static int arm_smmu_gr0_ns(int offset) { - switch(offset) { + switch (offset) { case ARM_SMMU_GR0_sCR0: case ARM_SMMU_GR0_sACR: case ARM_SMMU_GR0_sGFSR: @@ -91,15 +91,12 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm { struct cavium_smmu *cs; - cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL); + cs = devm_krealloc(smmu->dev, smmu, sizeof(*cs), GFP_KERNEL); if (!cs) return ERR_PTR(-ENOMEM); - cs->smmu = *smmu; cs->smmu.impl = &cavium_impl; - devm_kfree(smmu->dev, smmu); - return &cs->smmu; } @@ -217,11 +214,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) if (of_device_is_compatible(np, "nvidia,tegra194-smmu")) return nvidia_smmu_impl_init(smmu); - if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") || - of_device_is_compatible(np, "qcom,sc7180-smmu-500") || - of_device_is_compatible(np, "qcom,sm8150-smmu-500") || - of_device_is_compatible(np, "qcom,sm8250-smmu-500")) - return qcom_smmu_impl_init(smmu); + smmu = qcom_smmu_impl_init(smmu); if (of_device_is_compatible(np, "marvell,ap806-smmu-500")) smmu->impl = &mrvl_mmu500_impl; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c index 31368057e9be..29117444e5a0 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c @@ -242,18 +242,10 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) struct nvidia_smmu *nvidia_smmu; struct platform_device *pdev = to_platform_device(dev); - nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL); + nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL); if (!nvidia_smmu) return ERR_PTR(-ENOMEM); - /* - * Copy the data from struct arm_smmu_device *smmu allocated in - * arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu - * pointer used in arm-smmu.c once this function returns. - * This is necessary to derive nvidia_smmu from smmu pointer passed - * through arm_smmu_impl function calls subsequently. - */ - nvidia_smmu->smmu = *smmu; /* Instance 0 is ioremapped by arm-smmu.c. */ nvidia_smmu->bases[0] = smmu->base; @@ -267,12 +259,5 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) nvidia_smmu->smmu.impl = &nvidia_smmu_impl; - /* - * Free the struct arm_smmu_device *smmu allocated in arm-smmu.c. - * Once this function returns, arm-smmu.c would use arm_smmu_device - * allocated as part of struct nvidia_smmu. - */ - devm_kfree(dev, smmu); - return &nvidia_smmu->smmu; } diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index be4318044f96..add1859b2899 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -3,6 +3,7 @@ * Copyright (c) 2019, The Linux Foundation. All rights reserved. */ +#include <linux/adreno-smmu-priv.h> #include <linux/of_device.h> #include <linux/qcom_scm.h> @@ -10,8 +11,155 @@ struct qcom_smmu { struct arm_smmu_device smmu; + bool bypass_quirk; + u8 bypass_cbndx; }; +static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) +{ + return container_of(smmu, struct qcom_smmu, smmu); +} + +static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx, + u32 reg) +{ + /* + * On the GPU device we want to process subsequent transactions after a + * fault to keep the GPU from hanging + */ + reg |= ARM_SMMU_SCTLR_HUPCF; + + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); +} + +#define QCOM_ADRENO_SMMU_GPU_SID 0 + +static bool qcom_adreno_smmu_is_gpu_device(struct device *dev) +{ + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + int i; + + /* + * The GPU will always use SID 0 so that is a handy way to uniquely + * identify it and configure it for per-instance pagetables + */ + for (i = 0; i < fwspec->num_ids; i++) { + u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); + + if (sid == QCOM_ADRENO_SMMU_GPU_SID) + return true; + } + + return false; +} + +static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg( + const void *cookie) +{ + struct arm_smmu_domain *smmu_domain = (void *)cookie; + struct io_pgtable *pgtable = + io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); + return &pgtable->cfg; +} + +/* + * Local implementation to configure TTBR0 with the specified pagetable config. + * The GPU driver will call this to enable TTBR0 when per-instance pagetables + * are active + */ + +static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie, + const struct io_pgtable_cfg *pgtbl_cfg) +{ + struct arm_smmu_domain *smmu_domain = (void *)cookie; + struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; + + /* The domain must have split pagetables already enabled */ + if (cb->tcr[0] & ARM_SMMU_TCR_EPD1) + return -EINVAL; + + /* If the pagetable config is NULL, disable TTBR0 */ + if (!pgtbl_cfg) { + /* Do nothing if it is already disabled */ + if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0)) + return -EINVAL; + + /* Set TCR to the original configuration */ + cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg); + cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid); + } else { + u32 tcr = cb->tcr[0]; + + /* Don't call this again if TTBR0 is already enabled */ + if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0)) + return -EINVAL; + + tcr |= arm_smmu_lpae_tcr(pgtbl_cfg); + tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1); + + cb->tcr[0] = tcr; + cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; + cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid); + } + + arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx); + + return 0; +} + +static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_device *smmu, + struct device *dev, int start) +{ + int count; + + /* + * Assign context bank 0 to the GPU device so the GPU hardware can + * switch pagetables + */ + if (qcom_adreno_smmu_is_gpu_device(dev)) { + start = 0; + count = 1; + } else { + start = 1; + count = smmu->num_context_banks; + } + + return __arm_smmu_alloc_bitmap(smmu->context_map, start, count); +} + +static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, + struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) +{ + struct adreno_smmu_priv *priv; + + /* Only enable split pagetables for the GPU device (SID 0) */ + if (!qcom_adreno_smmu_is_gpu_device(dev)) + return 0; + + /* + * All targets that use the qcom,adreno-smmu compatible string *should* + * be AARCH64 stage 1 but double check because the arm-smmu code assumes + * that is the case when the TTBR1 quirk is enabled + */ + if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1) && + (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)) + pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1; + + /* + * Initialize private interface with GPU: + */ + + priv = dev_get_drvdata(dev); + priv->cookie = smmu_domain; + priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg; + priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg; + + return 0; +} + static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,adreno" }, { .compatible = "qcom,mdp4" }, @@ -23,6 +171,87 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { } }; +static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) +{ + unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1); + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); + u32 reg; + u32 smr; + int i; + + /* + * With some firmware versions writes to S2CR of type FAULT are + * ignored, and writing BYPASS will end up written as FAULT in the + * register. Perform a write to S2CR to detect if this is the case and + * if so reserve a context bank to emulate bypass streams. + */ + reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) | + FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) | + FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT); + arm_smmu_gr0_write(smmu, last_s2cr, reg); + reg = arm_smmu_gr0_read(smmu, last_s2cr); + if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) { + qsmmu->bypass_quirk = true; + qsmmu->bypass_cbndx = smmu->num_context_banks - 1; + + set_bit(qsmmu->bypass_cbndx, smmu->context_map); + + reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS); + arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg); + } + + for (i = 0; i < smmu->num_mapping_groups; i++) { + smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); + + if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) { + smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr); + smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); + smmu->smrs[i].valid = true; + + smmu->s2crs[i].type = S2CR_TYPE_BYPASS; + smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT; + smmu->s2crs[i].cbndx = 0xff; + } + } + + return 0; +} + +static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) +{ + struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); + u32 cbndx = s2cr->cbndx; + u32 type = s2cr->type; + u32 reg; + + if (qsmmu->bypass_quirk) { + if (type == S2CR_TYPE_BYPASS) { + /* + * Firmware with quirky S2CR handling will substitute + * BYPASS writes with FAULT, so point the stream to the + * reserved context bank and ask for translation on the + * stream + */ + type = S2CR_TYPE_TRANS; + cbndx = qsmmu->bypass_cbndx; + } else if (type == S2CR_TYPE_FAULT) { + /* + * Firmware with quirky S2CR handling will ignore FAULT + * writes, so trick it to write FAULT by asking for a + * BYPASS. + */ + type = S2CR_TYPE_BYPASS; + cbndx = 0xff; + } + } + + reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) | + FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) | + FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); +} + static int qcom_smmu_def_domain_type(struct device *dev) { const struct of_device_id *match = @@ -61,22 +290,51 @@ static int qcom_smmu500_reset(struct arm_smmu_device *smmu) } static const struct arm_smmu_impl qcom_smmu_impl = { + .cfg_probe = qcom_smmu_cfg_probe, .def_domain_type = qcom_smmu_def_domain_type, .reset = qcom_smmu500_reset, + .write_s2cr = qcom_smmu_write_s2cr, }; -struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) +static const struct arm_smmu_impl qcom_adreno_smmu_impl = { + .init_context = qcom_adreno_smmu_init_context, + .def_domain_type = qcom_smmu_def_domain_type, + .reset = qcom_smmu500_reset, + .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, + .write_sctlr = qcom_adreno_smmu_write_sctlr, +}; + +static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, + const struct arm_smmu_impl *impl) { struct qcom_smmu *qsmmu; - qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL); + qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL); if (!qsmmu) return ERR_PTR(-ENOMEM); - qsmmu->smmu = *smmu; - - qsmmu->smmu.impl = &qcom_smmu_impl; - devm_kfree(smmu->dev, smmu); + qsmmu->smmu.impl = impl; return &qsmmu->smmu; } + +static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { + { .compatible = "qcom,sc7180-smmu-500" }, + { .compatible = "qcom,sdm845-smmu-500" }, + { .compatible = "qcom,sm8150-smmu-500" }, + { .compatible = "qcom,sm8250-smmu-500" }, + { } +}; + +struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) +{ + const struct device_node *np = smmu->dev->of_node; + + if (of_match_node(qcom_smmu_impl_of_match, np)) + return qcom_smmu_create(smmu, &qcom_smmu_impl); + + if (of_device_is_compatible(np, "qcom,adreno-smmu")) + return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl); + + return smmu; +} diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index dad7fa86fbd4..d8979bb71fc0 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -617,7 +617,10 @@ void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) reg |= ARM_SMMU_SCTLR_E; - arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); + if (smmu->impl && smmu->impl->write_sctlr) + smmu->impl->write_sctlr(smmu, idx, reg); + else + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); } static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain, @@ -783,8 +786,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, goto out_clear_smmu; } - if (smmu_domain->non_strict) - pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; + if (smmu_domain->pgtbl_cfg.quirks) + pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) { @@ -929,9 +932,16 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; - u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | - FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | - FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); + u32 reg; + + if (smmu->impl && smmu->impl->write_s2cr) { + smmu->impl->write_s2cr(smmu, idx); + return; + } + + reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | + FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | + FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && smmu->smrs[idx].valid) @@ -1501,15 +1511,24 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, case DOMAIN_ATTR_NESTING: *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); return 0; + case DOMAIN_ATTR_IO_PGTABLE_CFG: { + struct io_pgtable_domain_attr *pgtbl_cfg = data; + *pgtbl_cfg = smmu_domain->pgtbl_cfg; + + return 0; + } default: return -ENODEV; } break; case IOMMU_DOMAIN_DMA: switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = smmu_domain->non_strict; + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: { + bool non_strict = smmu_domain->pgtbl_cfg.quirks & + IO_PGTABLE_QUIRK_NON_STRICT; + *(int *)data = non_strict; return 0; + } default: return -ENODEV; } @@ -1541,6 +1560,17 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, else smmu_domain->stage = ARM_SMMU_DOMAIN_S1; break; + case DOMAIN_ATTR_IO_PGTABLE_CFG: { + struct io_pgtable_domain_attr *pgtbl_cfg = data; + + if (smmu_domain->smmu) { + ret = -EPERM; + goto out_unlock; + } + + smmu_domain->pgtbl_cfg = *pgtbl_cfg; + break; + } default: ret = -ENODEV; } @@ -1548,7 +1578,10 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, case IOMMU_DOMAIN_DMA: switch (attr) { case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - smmu_domain->non_strict = *(int *)data; + if (*(int *)data) + smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; + else + smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT; break; default: ret = -ENODEV; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index 1a746476927c..d2a2d1bc58ba 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -144,6 +144,7 @@ enum arm_smmu_cbar_type { #define ARM_SMMU_CB_SCTLR 0x0 #define ARM_SMMU_SCTLR_S1_ASIDPNE BIT(12) #define ARM_SMMU_SCTLR_CFCFG BIT(7) +#define ARM_SMMU_SCTLR_HUPCF BIT(8) #define ARM_SMMU_SCTLR_CFIE BIT(6) #define ARM_SMMU_SCTLR_CFRE BIT(5) #define ARM_SMMU_SCTLR_E BIT(4) @@ -363,10 +364,10 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; + struct io_pgtable_domain_attr pgtbl_cfg; const struct iommu_flush_ops *flush_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; - bool non_strict; struct mutex init_mutex; /* Protects smmu pointer */ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ struct iommu_domain domain; @@ -436,6 +437,8 @@ struct arm_smmu_impl { int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain, struct arm_smmu_device *smmu, struct device *dev, int start); + void (*write_s2cr)(struct arm_smmu_device *smmu, int idx); + void (*write_sctlr)(struct arm_smmu_device *smmu, int idx, u32 reg); }; #define INVALID_SMENDX -1 diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 1b1ca63e6bbe..83674e32e58b 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2916,13 +2916,6 @@ static int device_def_domain_type(struct device *dev) if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); - /* - * Prevent any device marked as untrusted from getting - * placed into the statically identity mapping domain. - */ - if (pdev->untrusted) - return IOMMU_DOMAIN_DMA; - if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return IOMMU_DOMAIN_IDENTITY; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index a7a9bc08dcd1..7c9ea9d7874a 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -761,7 +761,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NON_STRICT | - IO_PGTABLE_QUIRK_ARM_TTBR1)) + IO_PGTABLE_QUIRK_ARM_TTBR1 | + IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -773,10 +774,15 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) tcr->sh = ARM_LPAE_TCR_SH_IS; tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA) + goto out_free_data; } else { tcr->sh = ARM_LPAE_TCR_SH_OS; tcr->irgn = ARM_LPAE_TCR_RGN_NC; - tcr->orgn = ARM_LPAE_TCR_RGN_NC; + if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) + tcr->orgn = ARM_LPAE_TCR_RGN_NC; + else + tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; } tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b53446bb8c6b..cc06dcadff8f 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -93,6 +93,8 @@ static void __iommu_detach_group(struct iommu_domain *domain, static int iommu_create_device_direct_mappings(struct iommu_group *group, struct device *dev); static struct iommu_group *iommu_group_get_for_dev(struct device *dev); +static ssize_t iommu_group_store_type(struct iommu_group *group, + const char *buf, size_t count); #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ @@ -499,6 +501,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group, { char *type = "unknown\n"; + mutex_lock(&group->mutex); if (group->default_domain) { switch (group->default_domain->type) { case IOMMU_DOMAIN_BLOCKED: @@ -515,6 +518,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group, break; } } + mutex_unlock(&group->mutex); strcpy(buf, type); return strlen(type); @@ -525,7 +529,8 @@ static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); static IOMMU_GROUP_ATTR(reserved_regions, 0444, iommu_group_show_resv_regions, NULL); -static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL); +static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, + iommu_group_store_type); static void iommu_group_release(struct kobject *kobj) { @@ -1460,12 +1465,14 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group); static int iommu_get_def_domain_type(struct device *dev) { const struct iommu_ops *ops = dev->bus->iommu_ops; - unsigned int type = 0; + + if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) + return IOMMU_DOMAIN_DMA; if (ops->def_domain_type) - type = ops->def_domain_type(dev); + return ops->def_domain_type(dev); - return (type == 0) ? iommu_def_domain_type : type; + return 0; } static int iommu_group_alloc_default_domain(struct bus_type *bus, @@ -1507,7 +1514,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group, if (group->default_domain) return 0; - type = iommu_get_def_domain_type(dev); + type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; return iommu_group_alloc_default_domain(dev->bus, group, type); } @@ -1645,12 +1652,8 @@ struct __group_domain_type { static int probe_get_default_domain_type(struct device *dev, void *data) { - const struct iommu_ops *ops = dev->bus->iommu_ops; struct __group_domain_type *gtype = data; - unsigned int type = 0; - - if (ops->def_domain_type) - type = ops->def_domain_type(dev); + unsigned int type = iommu_get_def_domain_type(dev); if (type) { if (gtype->type && gtype->type != type) { @@ -3029,3 +3032,228 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle) return ops->sva_get_pasid(handle); } EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); + +/* + * Changes the default domain of an iommu group that has *only* one device + * + * @group: The group for which the default domain should be changed + * @prev_dev: The device in the group (this is used to make sure that the device + * hasn't changed after the caller has called this function) + * @type: The type of the new default domain that gets associated with the group + * + * Returns 0 on success and error code on failure + * + * Note: + * 1. Presently, this function is called only when user requests to change the + * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type + * Please take a closer look if intended to use for other purposes. + */ +static int iommu_change_dev_def_domain(struct iommu_group *group, + struct device *prev_dev, int type) +{ + struct iommu_domain *prev_dom; + struct group_device *grp_dev; + int ret, dev_def_dom; + struct device *dev; + + if (!group) + return -EINVAL; + + mutex_lock(&group->mutex); + + if (group->default_domain != group->domain) { + dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); + ret = -EBUSY; + goto out; + } + + /* + * iommu group wasn't locked while acquiring device lock in + * iommu_group_store_type(). So, make sure that the device count hasn't + * changed while acquiring device lock. + * + * Changing default domain of an iommu group with two or more devices + * isn't supported because there could be a potential deadlock. Consider + * the following scenario. T1 is trying to acquire device locks of all + * the devices in the group and before it could acquire all of them, + * there could be another thread T2 (from different sub-system and use + * case) that has already acquired some of the device locks and might be + * waiting for T1 to release other device locks. + */ + if (iommu_group_device_count(group) != 1) { + dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); + ret = -EINVAL; + goto out; + } + + /* Since group has only one device */ + grp_dev = list_first_entry(&group->devices, struct group_device, list); + dev = grp_dev->dev; + + if (prev_dev != dev) { + dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); + ret = -EBUSY; + goto out; + } + + prev_dom = group->default_domain; + if (!prev_dom) { + ret = -EINVAL; + goto out; + } + + dev_def_dom = iommu_get_def_domain_type(dev); + if (!type) { + /* + * If the user hasn't requested any specific type of domain and + * if the device supports both the domains, then default to the + * domain the device was booted with + */ + type = dev_def_dom ? : iommu_def_domain_type; + } else if (dev_def_dom && type != dev_def_dom) { + dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", + iommu_domain_type_str(type)); + ret = -EINVAL; + goto out; + } + + /* + * Switch to a new domain only if the requested domain type is different + * from the existing default domain type + */ + if (prev_dom->type == type) { + ret = 0; + goto out; + } + + /* Sets group->default_domain to the newly allocated domain */ + ret = iommu_group_alloc_default_domain(dev->bus, group, type); + if (ret) + goto out; + + ret = iommu_create_device_direct_mappings(group, dev); + if (ret) + goto free_new_domain; + + ret = __iommu_attach_device(group->default_domain, dev); + if (ret) + goto free_new_domain; + + group->domain = group->default_domain; + + /* + * Release the mutex here because ops->probe_finalize() call-back of + * some vendor IOMMU drivers calls arm_iommu_attach_device() which + * in-turn might call back into IOMMU core code, where it tries to take + * group->mutex, resulting in a deadlock. + */ + mutex_unlock(&group->mutex); + + /* Make sure dma_ops is appropriatley set */ + iommu_group_do_probe_finalize(dev, group->default_domain); + iommu_domain_free(prev_dom); + return 0; + +free_new_domain: + iommu_domain_free(group->default_domain); + group->default_domain = prev_dom; + group->domain = prev_dom; + +out: + mutex_unlock(&group->mutex); + + return ret; +} + +/* + * Changing the default domain through sysfs requires the users to ubind the + * drivers from the devices in the iommu group. Return failure if this doesn't + * meet. + * + * We need to consider the race between this and the device release path. + * device_lock(dev) is used here to guarantee that the device release path + * will not be entered at the same time. + */ +static ssize_t iommu_group_store_type(struct iommu_group *group, + const char *buf, size_t count) +{ + struct group_device *grp_dev; + struct device *dev; + int ret, req_type; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + + if (WARN_ON(!group)) + return -EINVAL; + + if (sysfs_streq(buf, "identity")) + req_type = IOMMU_DOMAIN_IDENTITY; + else if (sysfs_streq(buf, "DMA")) + req_type = IOMMU_DOMAIN_DMA; + else if (sysfs_streq(buf, "auto")) + req_type = 0; + else + return -EINVAL; + + /* + * Lock/Unlock the group mutex here before device lock to + * 1. Make sure that the iommu group has only one device (this is a + * prerequisite for step 2) + * 2. Get struct *dev which is needed to lock device + */ + mutex_lock(&group->mutex); + if (iommu_group_device_count(group) != 1) { + mutex_unlock(&group->mutex); + pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); + return -EINVAL; + } + + /* Since group has only one device */ + grp_dev = list_first_entry(&group->devices, struct group_device, list); + dev = grp_dev->dev; + get_device(dev); + + /* + * Don't hold the group mutex because taking group mutex first and then + * the device lock could potentially cause a deadlock as below. Assume + * two threads T1 and T2. T1 is trying to change default domain of an + * iommu group and T2 is trying to hot unplug a device or release [1] VF + * of a PCIe device which is in the same iommu group. T1 takes group + * mutex and before it could take device lock assume T2 has taken device + * lock and is yet to take group mutex. Now, both the threads will be + * waiting for the other thread to release lock. Below, lock order was + * suggested. + * device_lock(dev); + * mutex_lock(&group->mutex); + * iommu_change_dev_def_domain(); + * mutex_unlock(&group->mutex); + * device_unlock(dev); + * + * [1] Typical device release path + * device_lock() from device/driver core code + * -> bus_notifier() + * -> iommu_bus_notifier() + * -> iommu_release_device() + * -> ops->release_device() vendor driver calls back iommu core code + * -> mutex_lock() from iommu core code + */ + mutex_unlock(&group->mutex); + + /* Check if the device in the group still has a driver bound to it */ + device_lock(dev); + if (device_is_bound(dev)) { + pr_err_ratelimited("Device is still bound to driver\n"); + ret = -EBUSY; + goto out; + } + + ret = iommu_change_dev_def_domain(group, dev, req_type); + ret = ret ?: count; + +out: + device_unlock(dev); + put_device(dev); + + return ret; +} diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 4cde111e425b..fb4d5a763e0c 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -86,6 +86,9 @@ struct io_pgtable_cfg { * * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table * for use in the upper half of a split address space. + * + * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability + * attributes set in the TCR for a non-coherent page-table walker. */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) @@ -93,6 +96,7 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) + #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; @@ -208,6 +212,10 @@ struct io_pgtable { #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) +struct io_pgtable_domain_attr { + unsigned long quirks; +}; + static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { iop->cfg.tlb->tlb_flush_all(iop->cookie); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b95a6f8db6ff..ffaa389ea128 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -118,6 +118,7 @@ enum iommu_attr { DOMAIN_ATTR_FSL_PAMUV1, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, + DOMAIN_ATTR_IO_PGTABLE_CFG, DOMAIN_ATTR_MAX, }; |