Commit 42345d63 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Radeon and amdkfd fixes.

  Radeon ones mostly for oops in some test/benchmark functions since
  fencing changes, and one regression fix for old GPUs,

  There is one cirrus regression fix, the 32bpp broke userspace, so this
  hides it behind a module option for the few users who care.

  I'm off for a few days, so this is probably the final pull I have, if
  I see fixes from Intel I'll forward the pull as I should have email"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/cirrus: Limit modes depending on bpp option
  drm/radeon: fix the crash in test functions
  drm/radeon: fix the crash in benchmark functions
  drm/radeon: properly set vm fragment size for TN/RL
  drm/radeon: don't init gpuvm if accel is disabled (v3)
  drm/radeon: fix PLLs on RS880 and older v2
  drm/amdkfd: Don't create BUG due to incorrect user parameter
  drm/amdkfd: max num of queues can't be 0
  drm/amdkfd: Fix bug in accounting of queues
parents d445d46d 7f551b1e
......@@ -822,7 +822,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
* Unconditionally decrement this counter, regardless of the queue's
* type.
*/
dqm->total_queue_count++;
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
......
......@@ -95,10 +95,10 @@ static int __init kfd_module_init(void)
}
/* Verify module parameters */
if ((max_num_of_queues_per_device < 0) ||
if ((max_num_of_queues_per_device < 1) ||
(max_num_of_queues_per_device >
KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}
......
......@@ -315,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
BUG_ON(!pqm);
pqn = get_queue_by_qid(pqm, qid);
BUG_ON(!pqn);
if (!pqn) {
pr_debug("amdkfd: No queue %d exists for update operation\n",
qid);
return -EFAULT;
}
pqn->q->properties.queue_address = p->queue_address;
pqn->q->properties.queue_size = p->queue_size;
......
......@@ -16,9 +16,12 @@
#include "cirrus_drv.h"
int cirrus_modeset = -1;
int cirrus_bpp = 24;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, cirrus_modeset, int, 0400);
MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
module_param_named(bpp, cirrus_bpp, int, 0400);
/*
* This is the generic driver code. This binds the driver to the drm core,
......
......@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
int cirrus_bo_push_sysram(struct cirrus_bo *bo);
int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
extern int cirrus_bpp;
#endif /* __CIRRUS_DRV_H__ */
......@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
const int max_size = cdev->mc.vram_size;
if (bpp > cirrus_bpp)
return false;
if (bpp > 32)
return false;
......
......@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
int count;
/* Just add a static list of modes */
count = drm_add_modes_noedid(connector, 1280, 1024);
drm_set_preferred_mode(connector, 1024, 768);
if (cirrus_bpp <= 24) {
count = drm_add_modes_noedid(connector, 1280, 1024);
drm_set_preferred_mode(connector, 1024, 768);
} else {
count = drm_add_modes_noedid(connector, 800, 600);
drm_set_preferred_mode(connector, 800, 600);
}
return count;
}
......
......@@ -34,7 +34,8 @@
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
uint64_t saddr, uint64_t daddr,
int flag, int n)
int flag, int n,
struct reservation_object *resv)
{
unsigned long start_jiffies;
unsigned long end_jiffies;
......@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
case RADEON_BENCHMARK_COPY_DMA:
fence = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
resv);
break;
case RADEON_BENCHMARK_COPY_BLIT:
fence = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
resv);
break;
default:
DRM_ERROR("Unknown copy method\n");
......@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.dma) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_DMA, n);
RADEON_BENCHMARK_COPY_DMA, n,
dobj->tbo.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
......@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.blit) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_BLIT, n);
RADEON_BENCHMARK_COPY_BLIT, n,
dobj->tbo.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
......
......@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
pll->flags & RADEON_PLL_USE_REF_DIV)
ref_div_max = pll->reference_div;
else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
/* fix for problems on RS880 */
ref_div_max = min(pll->max_ref_div, 7u);
else
ref_div_max = pll->max_ref_div;
......
......@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
struct radeon_bo_va *bo_va;
int r;
if (rdev->family < CHIP_CAYMAN) {
if ((rdev->family < CHIP_CAYMAN) ||
(!rdev->accel_working)) {
return 0;
}
......@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_bo_va *bo_va;
int r;
if (rdev->family < CHIP_CAYMAN) {
if ((rdev->family < CHIP_CAYMAN) ||
(!rdev->accel_working)) {
return;
}
......
......@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
return r;
}
if (rdev->accel_working) {
vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
return r;
}
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
radeon_vm_fini(rdev, vm);
......@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
radeon_vm_fini(rdev, vm);
}
radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}
......
......@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
vram_obj->tbo.resv);
else
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
vram_obj->tbo.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
r = PTR_ERR(fence);
......@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
vram_obj->tbo.resv);
else
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
vram_obj->tbo.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
r = PTR_ERR(fence);
......
......@@ -743,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
*/
/* NI is optimized for 256KB fragments, SI and newer for 64KB */
uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
(rdev->family == CHIP_ARUBA)) ?
R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
(rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
uint64_t frag_start = ALIGN(pe_start, frag_align);
uint64_t frag_end = pe_end & ~(frag_align - 1);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment