Commit c16b99d6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-next-2020-02-07' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Just some fixes for this merge window: the tegra changes fix some
  regressions in the merge, nouveau has a few modesetting fixes.

  The amdgpu fixes are bit bigger, but they contain a couple of weeks of
  fixes, and don't seem to contain anything that isn't really a fix.

  Summary:

  tegra:
   - merge window regression fixes

  nouveau:
   - couple of volta/turing modesetting fixes

  amdgpu:
   - EDC fixes for Arcturus
   - GDDR6 memory training fixe
   - Fix for reading gfx clockgating registers while in GFXOFF state
   - i2c freq fixes
   - Misc display fixes
   - TLB invalidation fix when using semaphores
   - VCN 2.5 instancing fixes
   - Switch raven1 gfxoff to a blacklist
   - Coreboot workaround for KV/KB
   - Root cause dongle fixes for display and revert workaround
   - Enable GPU reset for renoir and navi
   - Navi overclocking fixes
   - Fix up confusing warnings in display clock validation on raven

  amdkfd:
   - SDMA fix

  radeon:
   - Misc LUT fixes"

* tag 'drm-next-2020-02-07' of git://anongit.freedesktop.org/drm/drm: (90 commits)
  gpu: host1x: Set DMA direction only for DMA-mapped buffer objects
  drm/tegra: Reuse IOVA mapping where possible
  drm/tegra: Relax IOMMU usage criteria on old Tegra
  drm/amd/dm/mst: Ignore payload update failures
  drm/amdgpu: update default voltage for boot od table for navi1x
  drm/amdgpu/smu10: fix smu10_get_clock_by_type_with_voltage
  drm/amdgpu/smu10: fix smu10_get_clock_by_type_with_latency
  drm/amdgpu/display: handle multiple numbers of fclks in dcn_calcs.c (v2)
  drm/amdgpu: fetch default VDDC curve voltages (v2)
  drm/amdgpu/smu_v11_0: Correct behavior of restoring default tables (v2)
  drm/amdgpu/navi10: add OD_RANGE for navi overclocking
  drm/amdgpu/navi: fix index for OD MCLK
  drm/amd/display: Fix HW/SW state mismatch
  drm/amd/display: Fix a typo when computing dsc configuration
  drm/amd/powerplay: fix navi10 system intermittent reboot issue V2
  drm/amdkfd: Fix a bug in SDMA RLC queue counting under HWS mode
  drm/amd/display: Only enable cursor on pipes that need it
  drm/nouveau/kms/gv100-: avoid sending a core update until the first modeset
  drm/nouveau/kms/gv100-: move window ownership setup into modesetting path
  drm/nouveau/disp/gv100-: halt NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_ERROR storms
  ...
parents 8bf5973a 9f880327
......@@ -120,6 +120,7 @@ amdgpu-y += \
amdgpu_rlc.o \
gfx_v8_0.o \
gfx_v9_0.o \
gfx_v9_4.o \
gfx_v10_0.o
# add async DMA block
......
......@@ -1009,10 +1009,14 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define AMDGPU_REGS_IDX (1<<0)
#define AMDGPU_REGS_NO_KIQ (1<<1)
#define AMDGPU_REGS_KIQ (1<<2)
#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
......
......@@ -527,7 +527,7 @@ static int acp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = state == AMD_PG_STATE_GATE ? true : false;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu)
......
......@@ -2129,6 +2129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
return -ENOMEM;
mutex_init(&(*mem)->lock);
INIT_LIST_HEAD(&(*mem)->bo_va_list);
(*mem)->bo = amdgpu_bo_ref(gws_bo);
(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
(*mem)->process_info = process_info;
......
......@@ -42,19 +42,12 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};
static int amdgpu_ctx_total_num_entities(void)
{
unsigned i, num_entities = 0;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
num_entities += amdgpu_ctx_num_entities[i];
return num_entities;
}
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority)
{
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
return -EINVAL;
/* NORMAL and below are accessible by everyone */
if (priority <= DRM_SCHED_PRIORITY_NORMAL)
return 0;
......@@ -68,64 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum drm_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
{
unsigned num_entities = amdgpu_ctx_total_num_entities();
unsigned i, j;
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
enum drm_sched_priority priority;
int r;
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
return -EINVAL;
r = amdgpu_ctx_priority_permit(filp, priority);
if (r)
return r;
entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
GFP_KERNEL);
if (!entity)
return -ENOMEM;
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
ctx->entities[0] = kcalloc(num_entities,
sizeof(struct amdgpu_ctx_entity),
GFP_KERNEL);
if (!ctx->entities[0]) {
r = -ENOMEM;
goto error_free_fences;
}
for (i = 0; i < num_entities; ++i) {
struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
entity->sequence = 1;
entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
}
for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
ctx->entities[i] = ctx->entities[i - 1] +
amdgpu_ctx_num_entities[i - 1];
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
struct drm_gpu_scheduler **scheds;
struct drm_gpu_scheduler *sched;
unsigned num_scheds = 0;
switch (i) {
entity->sequence = 1;
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
switch (hw_ip) {
case AMDGPU_HW_IP_GFX:
sched = &adev->gfx.gfx_ring[0].sched;
scheds = &sched;
......@@ -166,53 +119,90 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
scheds = adev->jpeg.jpeg_sched;
num_scheds = adev->jpeg.num_jpeg_sched;
break;
}
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(&ctx->entities[i][j].entity,
priority, scheds,
num_scheds, &ctx->guilty);
if (r)
goto error_cleanup_entities;
}
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
&ctx->guilty);
if (r)
goto error_free_entity;
ctx->entities[hw_ip][ring] = entity;
return 0;
error_cleanup_entities:
for (i = 0; i < num_entities; ++i)
drm_sched_entity_destroy(&ctx->entities[0][i].entity);
kfree(ctx->entities[0]);
error_free_entity:
kfree(entity);
error_free_fences:
kfree(ctx->fences);
ctx->fences = NULL;
return r;
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum drm_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
int r;
r = amdgpu_ctx_priority_permit(filp, priority);
if (r)
return r;
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
return 0;
}
static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
{
int i;
if (!entity)
return;
for (i = 0; i < amdgpu_sched_jobs; ++i)
dma_fence_put(entity->fences[i]);
kfree(entity);
}
static void amdgpu_ctx_fini(struct kref *ref)
{
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
unsigned num_entities = amdgpu_ctx_total_num_entities();
struct amdgpu_device *adev = ctx->adev;
unsigned i, j;
if (!adev)
return;
for (i = 0; i < num_entities; ++i)
for (j = 0; j < amdgpu_sched_jobs; ++j)
dma_fence_put(ctx->entities[0][i].fences[j]);
kfree(ctx->fences);
kfree(ctx->entities[0]);
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
amdgpu_ctx_fini_entity(ctx->entities[i][j]);
ctx->entities[i][j] = NULL;
}
}
mutex_destroy(&ctx->lock);
kfree(ctx);
}
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity)
{
int r;
if (hw_ip >= AMDGPU_HW_IP_NUM) {
DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
return -EINVAL;
......@@ -229,7 +219,13 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
return -EINVAL;
}
*entity = &ctx->entities[hw_ip][ring].entity;
if (ctx->entities[hw_ip][ring] == NULL) {
r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
if (r)
return r;
}
*entity = &ctx->entities[hw_ip][ring]->entity;
return 0;
}
......@@ -269,14 +265,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
unsigned num_entities;
u32 i;
u32 i, j;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
if (!ctx->entities[i][j])
continue;
num_entities = amdgpu_ctx_total_num_entities();
for (i = 0; i < num_entities; i++)
drm_sched_entity_destroy(&ctx->entities[0][i].entity);
drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
}
}
amdgpu_ctx_fini(ref);
}
......@@ -506,19 +505,23 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
unsigned num_entities = amdgpu_ctx_total_num_entities();
enum drm_sched_priority ctx_prio;
unsigned i;
unsigned i, j;
ctx->override_priority = priority;
ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
struct drm_sched_entity *entity;
for (i = 0; i < num_entities; i++) {
struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
if (!ctx->entities[i][j])
continue;
drm_sched_entity_set_priority(entity, ctx_prio);
entity = &ctx->entities[i][j]->entity;
drm_sched_entity_set_priority(entity, ctx_prio);
}
}
}
......@@ -554,20 +557,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
unsigned num_entities = amdgpu_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i;
uint32_t id, i, j;
idp = &mgr->ctx_handles;
mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
for (i = 0; i < num_entities; i++) {
struct drm_sched_entity *entity;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
struct drm_sched_entity *entity;
if (!ctx->entities[i][j])
continue;
entity = &ctx->entities[0][i].entity;
timeout = drm_sched_entity_flush(entity, timeout);
entity = &ctx->entities[i][j]->entity;
timeout = drm_sched_entity_flush(entity, timeout);
}
}
}
mutex_unlock(&mgr->lock);
......@@ -576,10 +583,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
unsigned num_entities = amdgpu_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i;
uint32_t id, i, j;
idp = &mgr->ctx_handles;
......@@ -589,8 +595,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
}
for (i = 0; i < num_entities; i++)
drm_sched_entity_fini(&ctx->entities[0][i].entity);
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
struct drm_sched_entity *entity;
if (!ctx->entities[i][j])
continue;
entity = &ctx->entities[i][j]->entity;
drm_sched_entity_fini(entity);
}
}
}
}
......
......@@ -29,10 +29,12 @@ struct drm_device;
struct drm_file;
struct amdgpu_fpriv;
#define AMDGPU_MAX_ENTITY_NUM 4
struct amdgpu_ctx_entity {
uint64_t sequence;
struct dma_fence **fences;
struct drm_sched_entity entity;
struct dma_fence *fences[];
};
struct amdgpu_ctx {
......@@ -42,8 +44,7 @@ struct amdgpu_ctx {
unsigned reset_counter_query;
uint32_t vram_lost_counter;
spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM];
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented;
enum drm_sched_priority init_priority;
enum drm_sched_priority override_priority;
......
......@@ -216,8 +216,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{
uint32_t ret;
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_rreg(adev, reg);
if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
return amdgpu_kiq_rreg(adev, reg);
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
......@@ -294,8 +294,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
adev->last_mm_index = v;
}
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_wreg(adev, reg, v);
if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
return amdgpu_kiq_wreg(adev, reg, v);
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
......@@ -985,7 +985,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
{
struct sysinfo si;
bool is_os_64 = (sizeof(void *) == 8) ? true : false;
bool is_os_64 = (sizeof(void *) == 8);
uint64_t total_memory;
uint64_t dram_size_seven_GB = 0x1B8000000;
uint64_t dram_size_three_GB = 0xB8000000;
......@@ -3760,6 +3760,10 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
break;
default:
goto disabled;
......
......@@ -52,6 +52,9 @@ struct amdgpu_df_funcs {
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val);
uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
uint32_t df_inst);
uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
};
struct amdgpu_df {
......
......@@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
if (r)
return r;
......@@ -321,7 +321,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
amdgpu_ring_fini(ring);
}
......@@ -658,3 +658,95 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r, cnt = 0;
unsigned long flags;
uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_rreg(ring, reg);
amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
/* don't wait anymore for gpu reset case because this way may
* block gpu_recover() routine forever, e.g. this virt_kiq_rreg
* is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
* never return if we keep waiting in virt_kiq_rreg, which cause
* gpu_recover() hang there.
*
* also don't wait anymore for IRQ context
* */
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
goto failed_kiq_read;
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
if (cnt > MAX_KIQ_REG_TRY)
goto failed_kiq_read;
return adev->wb.wb[kiq->reg_val_offs];
failed_kiq_read:
pr_err("failed to read reg:%x\n", reg);
return ~0;
}
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
signed long r, cnt = 0;
unsigned long flags;
uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
/* don't wait anymore for gpu reset case because this way may
* block gpu_recover() routine forever, e.g. this virt_kiq_rreg
* is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
* never return if we keep waiting in virt_kiq_rreg, which cause
* gpu_recover() hang there.
*
* also don't wait anymore for IRQ context
* */
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
goto failed_kiq_write;
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
if (cnt > MAX_KIQ_REG_TRY)
goto failed_kiq_write;
return;
failed_kiq_write:
pr_err("failed to write reg:%x\n", reg);
}
......@@ -94,6 +94,7 @@ struct amdgpu_kiq {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
const struct kiq_pm4_funcs *pmf;
uint32_t reg_val_offs;
};
/*
......@@ -375,4 +376,6 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
#endif
......@@ -60,11 +60,6 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL