Merge tag 'amd-drm-fixes-6.19-2026-01-22' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.19-2026-01-22:

amdgpu:
- GC 12 fix
- Misc error path fixes
- DC analog fix
- SMU 6 fixes
- TLB flush fix
- DC idle optimization fix

amdkfd:
- GC 11 cooperative launch fix

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patch.msgid.link/20260122204308.946339-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie
2026-01-23 08:11:56 +10:00
9 changed files with 36 additions and 48 deletions

View File

@@ -763,7 +763,7 @@ void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
}
static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
u64 start_wptr, u32 end_wptr)
u64 start_wptr, u64 end_wptr)
{
unsigned int first_idx = start_wptr & ring->buf_mask;
unsigned int last_idx = end_wptr & ring->buf_mask;

View File

@@ -733,8 +733,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid)
return 0;
if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid) {
r = 0;
goto error_unlock_reset;
}
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,

View File

@@ -302,7 +302,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
if (job && job->vmid)
amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid);
amdgpu_ring_undo(ring);
return r;
goto free_fence;
}
*f = &af->base;
/* get a ref for the job */

View File

@@ -217,8 +217,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
return drm_sched_job_init(&(*job)->base, entity, 1, owner,
drm_client_id);
r = drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id);
if (!r)
return 0;
kfree((*job)->hw_vm_fence);
err_fence:
kfree((*job)->hw_fence);

View File

@@ -278,7 +278,6 @@ static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 instance, int xcc_id);
static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val);
static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
@@ -4634,16 +4633,6 @@ static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring,
bool start,
bool secure)
{
uint32_t v = secure ? FRAME_TMZ : 0;
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}
static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs)
{
@@ -5520,7 +5509,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl,
.init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec,
.preempt_ib = gfx_v12_0_ring_preempt_ib,
.emit_frame_cntl = gfx_v12_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,

View File

@@ -120,8 +120,7 @@ static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
&& dev->kfd->mec2_fw_version < 0x1b6) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1)
&& dev->kfd->mec2_fw_version < 0x30) ||
(KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0)))
kfd_dbg_has_cwsr_workaround(dev))
return false;
/* Assume debugging and cooperative launch supported otherwise. */

View File

@@ -248,8 +248,6 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
struct amdgpu_device *adev = drm_to_adev(dm->ddev);
int r;
mutex_lock(&dm->dc_lock);
@@ -279,16 +277,7 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
if (dm->active_vblank_irq_count == 0) {
dc_post_update_surfaces_to_stream(dm->dc);
r = amdgpu_dpm_pause_power_profile(adev, true);
if (r)
dev_warn(adev->dev, "failed to set default power profile mode\n");
dc_allow_idle_optimizations(dm->dc, true);
r = amdgpu_dpm_pause_power_profile(adev, false);
if (r)
dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);

View File

@@ -915,13 +915,19 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct amdgpu_dm_connector *amdgpu_dm_connector;
const struct dc_link *dc_link;
use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
/*
* Analog connectors may be hot-plugged unlike other connector
* types that don't support HPD. Only poll analog connectors.
*/
use_polling |=
amdgpu_dm_connector->dc_link &&
dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id);
dc_link = amdgpu_dm_connector->dc_link;
/*

View File

@@ -2273,8 +2273,6 @@ static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
if (scaling_factor == 0)
return -EINVAL;
memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
ret = si_calculate_adjusted_tdp_limits(adev,
false, /* ??? */
adev->pm.dpm.tdp_adjustment,
@@ -2283,6 +2281,12 @@ static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
if (ret)
return ret;
if (adev->pdev->device == 0x6611 && adev->pdev->revision == 0x87) {
/* Workaround buggy powertune on Radeon 430 and 520. */
tdp_limit = 32;
near_tdp_limit = 28;
}
smc_table->dpm2Params.TDPLimit =
cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
smc_table->dpm2Params.NearTDPLimit =
@@ -2328,16 +2332,8 @@ static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
if (ni_pi->enable_power_containment) {
SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
int ret;
memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
smc_table->dpm2Params.NearTDPLimit =
cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
smc_table->dpm2Params.SafePowerLimit =
cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
ret = amdgpu_si_copy_bytes_to_smc(adev,
(si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
@@ -3473,10 +3469,15 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0x87) ||
(adev->pdev->revision == 0x87 &&
adev->pdev->device != 0x6611) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605)) {
max_sclk = 75000;
} else if (adev->pdev->revision == 0x87 &&
adev->pdev->device == 0x6611) {
/* Radeon 430 and 520 */
max_sclk = 78000;
}
}
@@ -7600,12 +7601,12 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
WREG32(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
WREG32(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7617,12 +7618,12 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
WREG32(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
WREG32(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;