gitlab.arm.com will be in the maintainance mode on Wednesday June 29th 01:00 - 10:00 (UTC+1). Repositories is read only during the maintainance.

gitlab.arm.com will be in the maintainance mode on Wednesday June 29th 01:00 - 10:00 (UTC+1). Repositories is read only during the maintainance.

You are on a read-only GitLab instance.
Commit daf0678c authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-4.18' of git://people.freedesktop.org/~agd5f/linux into drm-next



Fixes for 4.18. Highlights:
- Fixes for gfxoff on Raven
- Remove an ATPX quirk now that the root cause is fixed
- Runtime PM fixes
- Vega20 register header update
- Wattman fixes
- Misc bug fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180614141428.2909-1-alexander.deucher@amd.com
parents 33ce21d6 5c16f36f
...@@ -342,15 +342,12 @@ void get_local_mem_info(struct kgd_dev *kgd, ...@@ -342,15 +342,12 @@ void get_local_mem_info(struct kgd_dev *kgd,
mem_info->local_mem_size_public, mem_info->local_mem_size_public,
mem_info->local_mem_size_private); mem_info->local_mem_size_private);
if (amdgpu_emu_mode == 1) {
mem_info->mem_clk_max = 100;
return;
}
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
mem_info->mem_clk_max = adev->clock.default_mclk / 100; mem_info->mem_clk_max = adev->clock.default_mclk / 100;
else else if (adev->powerplay.pp_funcs)
mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100; mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
else
mem_info->mem_clk_max = 100;
} }
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
...@@ -367,13 +364,12 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) ...@@ -367,13 +364,12 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
/* the sclk is in quantas of 10kHz */ /* the sclk is in quantas of 10kHz */
if (amdgpu_emu_mode == 1)
return 100;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100; return adev->clock.default_sclk / 100;
else if (adev->powerplay.pp_funcs)
return amdgpu_dpm_get_sclk(adev, false) / 100; return amdgpu_dpm_get_sclk(adev, false) / 100;
else
return 100;
} }
void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
......
...@@ -569,7 +569,6 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { ...@@ -569,7 +569,6 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 },
}; };
......
...@@ -522,6 +522,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -522,6 +522,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *e; struct amdgpu_bo_list_entry *e;
struct list_head duplicates; struct list_head duplicates;
unsigned i, tries = 10; unsigned i, tries = 10;
struct amdgpu_bo *gds;
struct amdgpu_bo *gws;
struct amdgpu_bo *oa;
int r; int r;
INIT_LIST_HEAD(&p->validated); INIT_LIST_HEAD(&p->validated);
...@@ -652,31 +655,36 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -652,31 +655,36 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis); p->bytes_moved_vis);
if (p->bo_list) { if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
struct amdgpu_bo *oa = p->bo_list->oa_obj;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
unsigned i; unsigned i;
gds = p->bo_list->gds_obj;
gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj;
for (i = 0; i < p->bo_list->num_entries; i++) { for (i = 0; i < p->bo_list->num_entries; i++) {
struct amdgpu_bo *bo = p->bo_list->array[i].robj; struct amdgpu_bo *bo = p->bo_list->array[i].robj;
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
} }
} else {
gds = p->adev->gds.gds_gfx_bo;
gws = p->adev->gds.gws_gfx_bo;
oa = p->adev->gds.oa_gfx_bo;
}
if (gds) { if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds); p->job->gds_base = amdgpu_bo_gpu_offset(gds);
p->job->gds_size = amdgpu_bo_size(gds); p->job->gds_size = amdgpu_bo_size(gds);
} }
if (gws) { if (gws) {
p->job->gws_base = amdgpu_bo_gpu_offset(gws); p->job->gws_base = amdgpu_bo_gpu_offset(gws);
p->job->gws_size = amdgpu_bo_size(gws); p->job->gws_size = amdgpu_bo_size(gws);
} }
if (oa) { if (oa) {
p->job->oa_base = amdgpu_bo_gpu_offset(oa); p->job->oa_base = amdgpu_bo_gpu_offset(oa);
p->job->oa_size = amdgpu_bo_size(oa); p->job->oa_size = amdgpu_bo_size(oa);
}
} }
if (!r && p->uf_entry.robj) { if (!r && p->uf_entry.robj) {
......
...@@ -1730,6 +1730,18 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) ...@@ -1730,6 +1730,18 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
} }
} }
} }
if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) {
/* enable gfx powergating */
amdgpu_device_ip_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_GATE);
/* enable gfxoff */
amdgpu_device_ip_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_SMC,
AMD_PG_STATE_GATE);
}
return 0; return 0;
} }
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_display.h"
void amdgpu_gem_object_free(struct drm_gem_object *gobj) void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{ {
...@@ -235,6 +236,13 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -235,6 +236,13 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
/* if gds bo is created from user space, it must be
* passed to bo list
*/
DRM_ERROR("GDS bo cannot be per-vm-bo\n");
return -EINVAL;
}
flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
size = size << AMDGPU_GDS_SHIFT; size = size << AMDGPU_GDS_SHIFT;
...@@ -749,15 +757,16 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, ...@@ -749,15 +757,16 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
uint32_t handle; uint32_t handle;
u32 domain;
int r; int r;
args->pitch = amdgpu_align_pitch(adev, args->width, args->pitch = amdgpu_align_pitch(adev, args->width,
DIV_ROUND_UP(args->bpp, 8), 0); DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height; args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE); args->size = ALIGN(args->size, PAGE_SIZE);
domain = amdgpu_bo_get_preferred_pin_domain(adev,
r = amdgpu_gem_object_create(adev, args->size, 0, amdgpu_display_supported_domains(adev));
AMDGPU_GEM_DOMAIN_VRAM, r = amdgpu_gem_object_create(adev, args->size, 0, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
false, NULL, &gobj); false, NULL, &gobj);
if (r) if (r)
......
...@@ -703,11 +703,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -703,11 +703,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* This assumes only APU display buffers are pinned with (VRAM|GTT). /* This assumes only APU display buffers are pinned with (VRAM|GTT).
* See function amdgpu_display_supported_domains() * See function amdgpu_display_supported_domains()
*/ */
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
domain = AMDGPU_GEM_DOMAIN_GTT;
}
if (bo->pin_count) { if (bo->pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type; uint32_t mem_type = bo->tbo.mem.mem_type;
...@@ -1066,3 +1062,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) ...@@ -1066,3 +1062,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
return bo->tbo.offset; return bo->tbo.offset;
} }
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
uint32_t domain)
{
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
domain = AMDGPU_GEM_DOMAIN_GTT;
}
return domain;
}
...@@ -289,7 +289,8 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, ...@@ -289,7 +289,8 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **fence, struct dma_fence **fence,
bool direct); bool direct);
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
uint32_t domain);
/* /*
* sub allocation * sub allocation
......
...@@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); ...@@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev) int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring;
struct drm_sched_rq *rq;
unsigned long bo_size; unsigned long bo_size;
const char *fw_name; const char *fw_name;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
...@@ -84,6 +82,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -84,6 +82,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
} }
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
family_id = le32_to_cpu(hdr->ucode_version) & 0xff; family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
...@@ -102,24 +101,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -102,24 +101,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
return r; return r;
} }
ring = &adev->vcn.ring_dec;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN dec run queue.\n");
return r;
}
ring = &adev->vcn.ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN enc run queue.\n");
return r;
}
return 0; return 0;
} }
...@@ -129,10 +110,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) ...@@ -129,10 +110,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
kfree(adev->vcn.saved_bo); kfree(adev->vcn.saved_bo);
drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr, &adev->vcn.gpu_addr,
(void **)&adev->vcn.cpu_addr); (void **)&adev->vcn.cpu_addr);
...@@ -278,7 +255,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) ...@@ -278,7 +255,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
} }
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_bo *bo, bool direct, struct amdgpu_bo *bo,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
...@@ -306,19 +283,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, ...@@ -306,19 +283,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
} }
ib->length_dw = 16; ib->length_dw = 16;
if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); job->fence = dma_fence_get(f);
job->fence = dma_fence_get(f); if (r)
if (r) goto err_free;
goto err_free;
amdgpu_job_free(job); amdgpu_job_free(job);
} else {
r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
}
amdgpu_bo_fence(bo, f, false); amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
...@@ -370,11 +340,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand ...@@ -370,11 +340,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = 14; i < 1024; ++i) for (i = 14; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); return amdgpu_vcn_dec_send_msg(ring, bo, fence);
} }
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo = NULL; struct amdgpu_bo *bo = NULL;
...@@ -396,7 +366,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han ...@@ -396,7 +366,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = 6; i < 1024; ++i) for (i = 6; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); return amdgpu_vcn_dec_send_msg(ring, bo, fence);
} }
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
...@@ -410,7 +380,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -410,7 +380,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error; goto error;
} }
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence); r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
goto error; goto error;
......
...@@ -67,8 +67,6 @@ struct amdgpu_vcn { ...@@ -67,8 +67,6 @@ struct amdgpu_vcn {
struct amdgpu_ring ring_dec; struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
struct amdgpu_irq_src irq; struct amdgpu_irq_src irq;
struct drm_sched_entity entity_dec;
struct drm_sched_entity entity_enc;
unsigned num_enc_rings; unsigned num_enc_rings;
}; };
......
...@@ -2123,7 +2123,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, ...@@ -2123,7 +2123,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
before->last = saddr - 1; before->last = saddr - 1;
before->offset = tmp->offset; before->offset = tmp->offset;
before->flags = tmp->flags; before->flags = tmp->flags;
list_add(&before->list, &tmp->list); before->bo_va = tmp->bo_va;
list_add(&before->list, &tmp->bo_va->invalids);
} }
/* Remember mapping split at the end */ /* Remember mapping split at the end */
...@@ -2133,7 +2134,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, ...@@ -2133,7 +2134,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
after->offset = tmp->offset; after->offset = tmp->offset;
after->offset += after->start - tmp->start; after->offset += after->start - tmp->start;
after->flags = tmp->flags; after->flags = tmp->flags;
list_add(&after->list, &tmp->list); after->bo_va = tmp->bo_va;
list_add(&after->list, &tmp->bo_va->invalids);
} }
list_del(&tmp->list); list_del(&tmp->list);
......
...@@ -64,7 +64,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) ...@@ -64,7 +64,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
int fb_channel_number; int fb_channel_number;
fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number)) if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
fb_channel_number = 0; fb_channel_number = 0;
return df_v3_6_channel_number[fb_channel_number]; return df_v3_6_channel_number[fb_channel_number];
......
...@@ -111,6 +111,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = ...@@ -111,6 +111,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
{ {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
...@@ -1837,13 +1838,15 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, ...@@ -1837,13 +1838,15 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
int indirect_offset, int indirect_offset,
int list_size, int list_size,
int *unique_indirect_regs, int *unique_indirect_regs,
int *unique_indirect_reg_count, int unique_indirect_reg_count,
int *indirect_start_offsets, int *indirect_start_offsets,
int *indirect_start_offsets_count) int *indirect_start_offsets_count,
int max_start_offsets_count)
{ {
int idx; int idx;
for (; indirect_offset < list_size; indirect_offset++) { for (; indirect_offset < list_size; indirect_offset++) {
WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
*indirect_start_offsets_count = *indirect_start_offsets_count + 1; *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
...@@ -1851,14 +1854,14 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, ...@@ -1851,14 +1854,14 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
indirect_offset += 2; indirect_offset += 2;
/* look for the matching indice */ /* look for the matching indice */
for (idx = 0; idx < *unique_indirect_reg_count; idx++) { for (idx = 0; idx < unique_indirect_reg_count; idx++) {
if (unique_indirect_regs[idx] == if (unique_indirect_regs[idx] ==
register_list_format[indirect_offset] || register_list_format[indirect_offset] ||
!unique_indirect_regs[idx]) !unique_indirect_regs[idx])
break; break;
} }
BUG_ON(idx >= *unique_indirect_reg_count); BUG_ON(idx >= unique_indirect_reg_count);
if (!unique_indirect_regs[idx]) if (!unique_indirect_regs[idx])
unique_indirect_regs[idx] = register_list_format[indirect_offset]; unique_indirect_regs[idx] = register_list_format[indirect_offset];
...@@ -1893,9 +1896,10 @@ static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev) ...@@ -1893,9 +1896,10 @@ static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_direct_reg_list_length, adev->gfx.rlc.reg_list_format_direct_reg_list_length,
adev->gfx.rlc.reg_list_format_size_bytes >> 2, adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indirect_regs, unique_indirect_regs,
&unique_indirect_reg_count, unique_indirect_reg_count,
indirect_start_offsets, indirect_start_offsets,
&indirect_start_offsets_count); &indirect_start_offsets_count,
ARRAY_SIZE(indirect_start_offsets));
/* enable auto inc in case it is disabled */ /* enable auto inc in case it is disabled */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
...@@ -3404,11 +3408,6 @@ static int gfx_v9_0_late_init(void *handle) ...@@ -3404,11 +3408,6 @@ static int gfx_v9_0_late_init(void *handle)
if (r) if (r)
return r; return r;
r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,