From 7e0357bef402875425de0296800c34c41842ba82 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 18 Apr 2024 12:07:27 +0800 Subject: drm/amdgpu: remove unused MCA driver codes - remove unused callback functions. - make part of mca functions static and refine the function order. Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 199 +++++++++++++------------------- 1 file changed, 82 insertions(+), 117 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 0734490347db..67c208861994 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -153,7 +153,7 @@ int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev) return 0; } -void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set) +static void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set) { if (!mca_set) return; @@ -162,7 +162,7 @@ void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set) INIT_LIST_HEAD(&mca_set->list); } -int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry) +static int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry) { struct mca_bank_node *node; @@ -183,7 +183,7 @@ int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_ return 0; } -void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set) +static void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set) { struct mca_bank_node *node, *tmp; @@ -228,6 +228,84 @@ static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, st idx, entry->regs[MCA_REG_IDX_SYND]); } +static int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + + if (!count) + return -EINVAL; + + if (mca_funcs && mca_funcs->mca_get_valid_mca_count) + return mca_funcs->mca_get_valid_mca_count(adev, type, count); + + return -EOPNOTSUPP; +} + +static int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, + int idx, struct mca_bank_entry *entry) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + int count; + + if (!mca_funcs || !mca_funcs->mca_get_mca_entry) + return -EOPNOTSUPP; + + switch (type) { + case AMDGPU_MCA_ERROR_TYPE_UE: + count = mca_funcs->max_ue_count; + break; + case AMDGPU_MCA_ERROR_TYPE_CE: + count = mca_funcs->max_ce_count; + break; + default: + return -EINVAL; + } + + if (idx >= count) + return -EINVAL; + + return mca_funcs->mca_get_mca_entry(adev, type, idx, entry); +} + +static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) +{ + struct mca_bank_entry entry; + uint32_t count = 0, i; + int ret; + + if (!mca_set) + return -EINVAL; + + ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + memset(&entry, 0, sizeof(entry)); + ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, &entry); + if (ret) + return ret; + + amdgpu_mca_bank_set_add_entry(mca_set, &entry); + } + + return 0; +} + +static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + + if (!count || !entry) + return -EINVAL; + + if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count) + return -EOPNOTSUPP; + + return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count); +} + int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data, struct ras_query_context *qctx) { @@ -241,7 +319,7 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo amdgpu_mca_bank_set_init(&mca_set); - ret = amdgpu_mca_smu_get_mca_set(adev, blk, type, &mca_set); + ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set); if (ret) goto out_mca_release; @@ -286,119 +364,6 @@ out_mca_release: return ret; } - -int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) -{ - const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; - - if (!count) - return -EINVAL; - - if (mca_funcs && mca_funcs->mca_get_valid_mca_count) - return mca_funcs->mca_get_valid_mca_count(adev, type, count); - - return -EOPNOTSUPP; -} - -int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, uint32_t *total) -{ - const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; - struct mca_bank_set mca_set; - struct mca_bank_node *node; - struct mca_bank_entry *entry; - uint32_t count; - int ret; - - if (!total) - return -EINVAL; - - if (!mca_funcs) - return -EOPNOTSUPP; - - if (!mca_funcs->mca_get_ras_mca_set || !mca_funcs->mca_get_valid_mca_count) - return -EOPNOTSUPP; - - amdgpu_mca_bank_set_init(&mca_set); - - ret = mca_funcs->mca_get_ras_mca_set(adev, blk, type, &mca_set); - if (ret) - goto err_mca_set_release; - - *total = 0; - list_for_each_entry(node, &mca_set.list, node) { - entry = &node->entry; - - count = 0; - ret = mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, &count); - if (ret) - goto err_mca_set_release; - - *total += count; - } - -err_mca_set_release: - amdgpu_mca_bank_set_release(&mca_set); - - return ret; -} - -int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) -{ - const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; - if (!count || !entry) - return -EINVAL; - - if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count) - return -EOPNOTSUPP; - - - return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count); -} - -int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) -{ - const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; - - if (!mca_set) - return -EINVAL; - - if (!mca_funcs || !mca_funcs->mca_get_ras_mca_set) - return -EOPNOTSUPP; - - WARN_ON(!list_empty(&mca_set->list)); - - return mca_funcs->mca_get_ras_mca_set(adev, blk, type, mca_set); -} - -int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, - int idx, struct mca_bank_entry *entry) -{ - const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; - int count; - - if (!mca_funcs || !mca_funcs->mca_get_mca_entry) - return -EOPNOTSUPP; - - switch (type) { - case AMDGPU_MCA_ERROR_TYPE_UE: - count = mca_funcs->max_ue_count; - break; - case AMDGPU_MCA_ERROR_TYPE_CE: - count = mca_funcs->max_ce_count; - break; - default: - return -EINVAL; - } - - if (idx >= count) - return -EINVAL; - - return mca_funcs->mca_get_mca_entry(adev, type, idx, entry); -} - #if defined(CONFIG_DEBUG_FS) static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val) { -- cgit From 8fb20d9551368f0b1ef5b31fa0b4634f4be37157 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 18 Apr 2024 13:12:36 +0800 Subject: drm/amdgpu: add amdgpu MCA bank dispatch function support - Refine mca driver code. - Centralize mca bank dispatch code logic. Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 97 +++++++++++++++++++-------------- 1 file changed, 55 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 67c208861994..859d594c02a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -267,7 +267,8 @@ static int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_ return mca_funcs->mca_get_mca_entry(adev, type, idx, entry); } -static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) +static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set, + struct ras_query_context *qctx) { struct mca_bank_entry entry; uint32_t count = 0, i; @@ -287,6 +288,8 @@ static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mc return ret; amdgpu_mca_bank_set_add_entry(mca_set, &entry); + + amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx); } return 0; @@ -306,36 +309,36 @@ static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count); } -int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, - struct ras_err_data *err_data, struct ras_query_context *qctx) +static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct mca_bank_set *mca_set, struct ras_err_data *err_data) { + struct ras_err_addr err_addr; struct amdgpu_smuio_mcm_config_info mcm_info; - struct ras_err_addr err_addr = {0}; - struct mca_bank_set mca_set; struct mca_bank_node *node; struct mca_bank_entry *entry; uint32_t count; - int ret, i = 0; + int ret; - amdgpu_mca_bank_set_init(&mca_set); + if (!mca_set) + return -EINVAL; - ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set); - if (ret) - goto out_mca_release; + if (!mca_set->nr_entries) + return 0; - list_for_each_entry(node, &mca_set.list, node) { + list_for_each_entry(node, &mca_set->list, node) { entry = &node->entry; - amdgpu_mca_smu_mca_bank_dump(adev, i++, entry, qctx); - count = 0; ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count); if (ret) - goto out_mca_release; + return ret; if (!count) continue; + memset(&mcm_info, 0, sizeof(mcm_info)); + memset(&err_addr, 0, sizeof(err_addr)); + mcm_info.socket_id = entry->info.socket_id; mcm_info.die_id = entry->info.aid; @@ -345,19 +348,36 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR]; } - if (type == AMDGPU_MCA_ERROR_TYPE_UE) + if (type == AMDGPU_MCA_ERROR_TYPE_UE) { amdgpu_ras_error_statistic_ue_count(err_data, - &mcm_info, &err_addr, (uint64_t)count); - else { + &mcm_info, &err_addr, (uint64_t)count); + } else { if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS])) amdgpu_ras_error_statistic_de_count(err_data, - &mcm_info, &err_addr, (uint64_t)count); + &mcm_info, &err_addr, (uint64_t)count); else amdgpu_ras_error_statistic_ce_count(err_data, - &mcm_info, &err_addr, (uint64_t)count); + &mcm_info, &err_addr, (uint64_t)count); } } + return 0; +} + +int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct ras_err_data *err_data, struct ras_query_context *qctx) +{ + struct mca_bank_set mca_set; + int ret; + + amdgpu_mca_bank_set_init(&mca_set); + + ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set, qctx); + if (ret) + goto out_mca_release; + + ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_set, err_data); + out_mca_release: amdgpu_mca_bank_set_release(&mca_set); @@ -402,36 +422,29 @@ static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry) static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type) { struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct mca_bank_entry *entry; - uint32_t count = 0; - int i, ret; + struct mca_bank_node *node; + struct mca_bank_set mca_set; + struct ras_query_context qctx; + int ret; - ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count); + amdgpu_mca_bank_set_init(&mca_set); + + qctx.event_id = 0ULL; + ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set, &qctx); if (ret) - return ret; + goto err_free_mca_set; seq_printf(m, "amdgpu smu %s valid mca count: %d\n", - type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", count); - - if (!count) - return 0; - - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - for (i = 0; i < count; i++) { - memset(entry, 0, sizeof(*entry)); + type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", mca_set.nr_entries); - ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, entry); - if (ret) - goto err_free_entry; + if (!mca_set.nr_entries) + goto err_free_mca_set; - mca_dump_entry(m, entry); - } + list_for_each_entry(node, &mca_set.list, node) + mca_dump_entry(m, &node->entry); -err_free_entry: - kfree(entry); +err_free_mca_set: + amdgpu_mca_bank_set_release(&mca_set); return ret; } -- cgit From 76ad30f51aa0d1bd99f12658d4775a86df6e4282 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 18 Apr 2024 15:46:00 +0800 Subject: drm/amdgpu: add MCA smu cache support v1: because SMU CE valid mca bank will be cleared after reading, this patch adds mca cache at the driver level to ensure that the mca bank is not lost. v2: refine amdgpu_mca_init/fini/reset() function name. v3: add mca_cache.lock support only add CE bank to mca bank cache. Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 95 ++++++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h | 19 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 9 ++++ 3 files changed, 116 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 859d594c02a5..264f56fd4f66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -183,6 +183,29 @@ static int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mc return 0; } +static int amdgpu_mca_bank_set_merge(struct mca_bank_set *mca_set, struct mca_bank_set *new) +{ + struct mca_bank_node *node; + + list_for_each_entry(node, &new->list, node) + amdgpu_mca_bank_set_add_entry(mca_set, &node->entry); + + return 0; +} + +static int amdgpu_mca_bank_set_remove_node(struct mca_bank_set *mca_set, struct mca_bank_node *node) +{ + if (!node) + return -EINVAL; + + list_del(&node->node); + kvfree(node); + + mca_set->nr_entries--; + + return 0; +} + static void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set) { struct mca_bank_node *node, *tmp; @@ -200,6 +223,41 @@ void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_m mca->mca_funcs = mca_funcs; } +int amdgpu_mca_init(struct amdgpu_device *adev) +{ + struct amdgpu_mca *mca = &adev->mca; + struct mca_bank_cache *mca_cache; + int i; + + for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) { + mca_cache = &mca->mca_caches[i]; + mutex_init(&mca_cache->lock); + amdgpu_mca_bank_set_init(&mca_cache->mca_set); + } + + return 0; +} + +void amdgpu_mca_fini(struct amdgpu_device *adev) +{ + struct amdgpu_mca *mca = &adev->mca; + struct mca_bank_cache *mca_cache; + int i; + + for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) { + mca_cache = &mca->mca_caches[i]; + amdgpu_mca_bank_set_release(&mca_cache->mca_set); + mutex_destroy(&mca_cache->lock); + } +} + +int amdgpu_mca_reset(struct amdgpu_device *adev) +{ + amdgpu_mca_fini(adev); + + return amdgpu_mca_init(adev); +} + int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; @@ -314,7 +372,7 @@ static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_r { struct ras_err_addr err_addr; struct amdgpu_smuio_mcm_config_info mcm_info; - struct mca_bank_node *node; + struct mca_bank_node *node, *tmp; struct mca_bank_entry *entry; uint32_t count; int ret; @@ -325,7 +383,7 @@ static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_r if (!mca_set->nr_entries) return 0; - list_for_each_entry(node, &mca_set->list, node) { + list_for_each_entry_safe(node, tmp, &mca_set->list, node) { entry = &node->entry; count = 0; @@ -359,15 +417,30 @@ static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_r amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, &err_addr, (uint64_t)count); } + + amdgpu_mca_bank_set_remove_node(mca_set, node); } return 0; } +static int amdgpu_mca_add_mca_set_to_cache(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *new) +{ + struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type]; + int ret; + + mutex_lock(&mca_cache->lock); + ret = amdgpu_mca_bank_set_merge(&mca_cache->mca_set, new); + mutex_unlock(&mca_cache->lock); + + return ret; +} + int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data, struct ras_query_context *qctx) { struct mca_bank_set mca_set; + struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type]; int ret; amdgpu_mca_bank_set_init(&mca_set); @@ -377,6 +450,21 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo goto out_mca_release; ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_set, err_data); + if (ret) + goto out_mca_release; + + /* add remain mca bank to mca cache */ + if (mca_set.nr_entries) { + ret = amdgpu_mca_add_mca_set_to_cache(adev, type, &mca_set); + if (ret) + goto out_mca_release; + } + + /* dispatch mca set again if mca cache has valid data */ + mutex_lock(&mca_cache->lock); + if (mca_cache->mca_set.nr_entries) + ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_cache->mca_set, err_data); + mutex_unlock(&mca_cache->lock); out_mca_release: amdgpu_mca_bank_set_release(&mca_set); @@ -443,6 +531,9 @@ static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type) list_for_each_entry(node, &mca_set.list, node) mca_dump_entry(m, &node->entry); + /* add mca bank to mca bank cache */ + ret = amdgpu_mca_add_mca_set_to_cache(adev, type, &mca_set); + err_free_mca_set: amdgpu_mca_bank_set_release(&mca_set); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index 4d0a0f91c375..9b97cfa28e05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -77,11 +77,22 @@ struct amdgpu_mca_ras { struct amdgpu_mca_ras_block *ras; }; +struct mca_bank_set { + int nr_entries; + struct list_head list; +}; + +struct mca_bank_cache { + struct mca_bank_set mca_set; + struct mutex lock; +}; + struct amdgpu_mca { struct amdgpu_mca_ras mp0; struct amdgpu_mca_ras mp1; struct amdgpu_mca_ras mpio; const struct amdgpu_mca_smu_funcs *mca_funcs; + struct mca_bank_cache mca_caches[AMDGPU_MCA_ERROR_TYPE_DE]; }; enum mca_reg_idx { @@ -113,11 +124,6 @@ struct mca_bank_node { struct list_head node; }; -struct mca_bank_set { - int nr_entries; - struct list_head list; -}; - struct amdgpu_mca_smu_funcs { int max_ue_count; int max_ce_count; @@ -149,6 +155,9 @@ int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev); int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev); void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs); +int amdgpu_mca_init(struct amdgpu_device *adev); +void amdgpu_mca_fini(struct amdgpu_device *adev); +int amdgpu_mca_reset(struct amdgpu_device *adev); int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable); int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, uint32_t *total); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1adc81a55734..0522533c9182 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -3629,6 +3629,13 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) amdgpu_ras_set_aca_debug_mode(adev, false); } else { + if (amdgpu_in_reset(adev)) + r = amdgpu_mca_reset(adev); + else + r = amdgpu_mca_init(adev); + if (r) + return r; + amdgpu_ras_set_mca_debug_mode(adev, false); } @@ -3701,6 +3708,8 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) if (amdgpu_aca_is_enabled(adev)) amdgpu_aca_fini(adev); + else + amdgpu_mca_fini(adev); WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); -- cgit From 5eccab32c15f1e5cf9651d865fb20012d3563c96 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 23 Apr 2024 10:14:47 +0800 Subject: drm/amdgpu: avoid dump mca bank log muti times during ras ISR because the ue valid mca count will only be cleared after gpu reset, so only dump mca log on the first time to get mca bank after receive RAS interrupt. Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 27 +++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h | 1 + 2 files changed, 28 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 264f56fd4f66..a111751b9781 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -229,6 +229,8 @@ int amdgpu_mca_init(struct amdgpu_device *adev) struct mca_bank_cache *mca_cache; int i; + atomic_set(&mca->ue_update_flag, 0); + for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) { mca_cache = &mca->mca_caches[i]; mutex_init(&mca_cache->lock); @@ -244,6 +246,8 @@ void amdgpu_mca_fini(struct amdgpu_device *adev) struct mca_bank_cache *mca_cache; int i; + atomic_set(&mca->ue_update_flag, 0); + for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) { mca_cache = &mca->mca_caches[i]; amdgpu_mca_bank_set_release(&mca_cache->mca_set); @@ -325,6 +329,26 @@ static int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_ return mca_funcs->mca_get_mca_entry(adev, type, idx, entry); } +static bool amdgpu_mca_bank_should_update(struct amdgpu_device *adev, enum amdgpu_mca_error_type type) +{ + struct amdgpu_mca *mca = &adev->mca; + bool ret = true; + + /* + * Because the UE Valid MCA count will only be cleared after reset, + * in order to avoid repeated counting of the error count, + * the aca bank is only updated once during the gpu recovery stage. + */ + if (type == AMDGPU_MCA_ERROR_TYPE_UE) { + if (amdgpu_ras_intr_triggered()) + ret = atomic_cmpxchg(&mca->ue_update_flag, 0, 1) == 0; + else + atomic_set(&mca->ue_update_flag, 0); + } + + return ret; +} + static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set, struct ras_query_context *qctx) { @@ -335,6 +359,9 @@ static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mc if (!mca_set) return -EINVAL; + if (!amdgpu_mca_bank_should_update(adev, type)) + return 0; + ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index 9b97cfa28e05..e80323ff90c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -93,6 +93,7 @@ struct amdgpu_mca { struct amdgpu_mca_ras mpio; const struct amdgpu_mca_smu_funcs *mca_funcs; struct mca_bank_cache mca_caches[AMDGPU_MCA_ERROR_TYPE_DE]; + atomic_t ue_update_flag; }; enum mca_reg_idx { -- cgit From a6bcffa596770b0c54b3ddccbc115bdab4df08e9 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 1 May 2024 00:12:34 +0800 Subject: drm/amdgpu: Add smu v13_0_14 ip block Add smu v13_0_14 ip block support Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 4 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 4 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c | 2 ++ drivers/gpu/drm/amd/amdgpu/soc15.c | 4 +++- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 2 ++ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 3 ++- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 7 +++++-- 10 files changed, 24 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index c50202215f6b..28febf33fb1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -892,7 +892,9 @@ DEFINE_DEBUGFS_ATTRIBUTE(aca_debug_mode_fops, NULL, amdgpu_aca_smu_debug_mode_se void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root) { #if defined(CONFIG_DEBUG_FS) - if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6)) + if (!root || + (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6) && + adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 14))) return; debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index f4fefda7e7d8..572c6ac27ccc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1910,6 +1910,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 11): + case IP_VERSION(13, 0, 14): amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; case IP_VERSION(14, 0, 0): diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index a08c148b13f9..ceb5163480f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -100,6 +100,7 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) *fru_addr = FRU_EEPROM_MADDR_6; return true; case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): if (fru_addr) *fru_addr = FRU_EEPROM_MADDR_8; return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index a111751b9781..7b7040ec61bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -609,7 +609,9 @@ DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_se void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root) { #if defined(CONFIG_DEBUG_FS) - if (!root || amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6)) + if (!root || + (amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6) && + amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 14))) return; debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 06a62a8a992e..9b789dcc2bd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -161,6 +161,7 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev) case IP_VERSION(13, 0, 10): return true; case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): return (adev->gmc.is_app_apu) ? false : true; default: return false; @@ -222,6 +223,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, return true; case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 10): + case IP_VERSION(13, 0, 14): control->i2c_address = EEPROM_I2C_MADDR_4; return true; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index ea4873f6ccd1..bfdde772b7ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -33,6 +33,7 @@ int amdgpu_reset_init(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): ret = aldebaran_reset_init(adev); break; case IP_VERSION(11, 0, 7): @@ -55,6 +56,7 @@ int amdgpu_reset_fini(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): ret = aldebaran_reset_fini(adev); break; case IP_VERSION(11, 0, 7): diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 55ee5ac82879..5169795df38c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -326,7 +326,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) || amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14)) return 10000; if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) || amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 1)) @@ -554,6 +555,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev) return AMD_RESET_METHOD_MODE2; break; case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): /* Use gpu_recovery param to target a reset method. * Enable triggering of GPU reset only if specified * by module parameter. diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 7789b313285c..bdf9f8031187 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -705,6 +705,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) smu_v13_0_0_set_ppt_funcs(smu); break; case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): smu_v13_0_6_set_ppt_funcs(smu); /* Enable pp_od_clk_voltage node */ smu->od_enabled = true; @@ -2716,6 +2717,7 @@ int smu_get_power_limit(void *handle, switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index ed5a7a83c9e2..0fd25b72a40c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -271,7 +271,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; if (smu->is_apu || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14)) adev->pm.fw_version = smu_version; /* only for dGPU w/ SMU13*/ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index a923e44451d6..051092f1b1b4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -68,6 +68,7 @@ #undef pr_debug MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) @@ -462,8 +463,10 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, memset(&pm_metrics->common_header, 0, sizeof(pm_metrics->common_header)); - pm_metrics->common_header.mp1_ip_discovery_version = - IP_VERSION(13, 0, 6); + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) + pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 6); + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14)) + pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 14); pm_metrics->common_header.pmfw_version = pmfw_version; pm_metrics->common_header.pmmetrics_version = table_version; pm_metrics->common_header.structure_size = -- cgit From 85a24a3ea09e93c05ff59609fde6c3d825a014c2 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 6 May 2024 10:41:28 +0800 Subject: drm/amdgpu: ignoring unsupported ras blocks when MCA bank dispatches This patch is used to solve the problem of incorrect parsing of error counts. When the UE trigger gpu is reset, the driver will attempt to parse all possible ras blocks. For ras blocks that are not supported by the current ASIC, the driver should ignore this error. Signed-off-by: Yang Wang Reviewed-by: Candice Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 7b7040ec61bd..e0fe47c54e75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -415,7 +415,7 @@ static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_r count = 0; ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count); - if (ret) + if (ret && ret != -EOPNOTSUPP) return ret; if (!count) -- cgit From 258ed689bc3163f86204f75df6c23f92b59b3fad Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 17 May 2024 07:56:24 +0800 Subject: drm/amdgpu: change bank cache lock type to spinlock modify the lock type to 'spinlock' to avoid schedule issue in interrupt context. Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 11 +++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index e0fe47c54e75..da40c2d97df8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -233,7 +233,7 @@ int amdgpu_mca_init(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) { mca_cache = &mca->mca_caches[i]; - mutex_init(&mca_cache->lock); + spin_lock_init(&mca_cache->lock); amdgpu_mca_bank_set_init(&mca_cache->mca_set); } @@ -251,7 +251,6 @@ void amdgpu_mca_fini(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) { mca_cache = &mca->mca_caches[i]; amdgpu_mca_bank_set_release(&mca_cache->mca_set); - mutex_destroy(&mca_cache->lock); } } @@ -456,9 +455,9 @@ static int amdgpu_mca_add_mca_set_to_cache(struct amdgpu_device *adev, enum amdg struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type]; int ret; - mutex_lock(&mca_cache->lock); + spin_lock(&mca_cache->lock); ret = amdgpu_mca_bank_set_merge(&mca_cache->mca_set, new); - mutex_unlock(&mca_cache->lock); + spin_unlock(&mca_cache->lock); return ret; } @@ -488,10 +487,10 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo } /* dispatch mca set again if mca cache has valid data */ - mutex_lock(&mca_cache->lock); + spin_lock(&mca_cache->lock); if (mca_cache->mca_set.nr_entries) ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_cache->mca_set, err_data); - mutex_unlock(&mca_cache->lock); + spin_unlock(&mca_cache->lock); out_mca_release: amdgpu_mca_bank_set_release(&mca_set); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index e80323ff90c1..c3c184c88dad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -84,7 +84,7 @@ struct mca_bank_set { struct mca_bank_cache { struct mca_bank_set mca_set; - struct mutex lock; + spinlock_t lock; }; struct amdgpu_mca { -- cgit