Commit 52791eee authored by Christian König's avatar Christian König
Browse files

dma-buf: rename reservation_object to dma_resv



Be more consistent with the naming of the other DMA-buf objects.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/323401/
parent 5d344f58
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
reservation.o seqno-fence.o
dma-resv.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
......@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/reservation.h>
#include <linux/dma-resv.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
......@@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
reservation_object_fini(dmabuf->resv);
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
module_put(dmabuf->owner);
kfree(dmabuf);
......@@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
* To support cross-device and cross-driver synchronization of buffer access
* implicit fences (represented internally in the kernel with &struct fence) can
* be attached to a &dma_buf. The glue for that and a few related things are
* provided in the &reservation_object structure.
* provided in the &dma_resv structure.
*
* Userspace can query the state of these implicitly tracked fences using poll()
* and related system calls:
......@@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
struct dma_resv *resv;
struct dma_resv_list *fobj;
struct dma_fence *fence_excl;
__poll_t events;
unsigned shared_count;
......@@ -214,7 +214,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
return 0;
rcu_read_lock();
reservation_object_fences(resv, &fence_excl, &fobj, &shared_count);
dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
__poll_t pevents = EPOLLIN;
......@@ -493,13 +493,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
{
struct dma_buf *dmabuf;
struct reservation_object *resv = exp_info->resv;
struct dma_resv *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
int ret;
if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object);
alloc_size += sizeof(struct dma_resv);
else
/* prevent &dma_buf[1] == dma_buf->resv */
alloc_size += 1;
......@@ -531,8 +531,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
if (!resv) {
resv = (struct reservation_object *)&dmabuf[1];
reservation_object_init(resv);
resv = (struct dma_resv *)&dmabuf[1];
dma_resv_init(resv);
}
dmabuf->resv = resv;
......@@ -896,11 +896,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
{
bool write = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_TO_DEVICE);
struct reservation_object *resv = dmabuf->resv;
struct dma_resv *resv = dmabuf->resv;
long ret;
/* Wait on any implicit rendering fences */
ret = reservation_object_wait_timeout_rcu(resv, write, true,
ret = dma_resv_wait_timeout_rcu(resv, write, true,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
......@@ -1141,8 +1141,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
int ret;
struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj;
struct reservation_object *robj;
struct reservation_object_list *fobj;
struct dma_resv *robj;
struct dma_resv_list *fobj;
struct dma_fence *fence;
int count = 0, attach_count, shared_count, i;
size_t size = 0;
......@@ -1175,7 +1175,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
robj = buf_obj->resv;
rcu_read_lock();
reservation_object_fences(robj, &fence, &fobj, &shared_count);
dma_resv_fences(robj, &fence, &fobj, &shared_count);
rcu_read_unlock();
if (fence)
......
......@@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
*
* - Then there's also implicit fencing, where the synchronization points are
* implicitly passed around as part of shared &dma_buf instances. Such
* implicit fences are stored in &struct reservation_object through the
* implicit fences are stored in &struct dma_resv through the
* &dma_buf.resv pointer.
*/
......
......@@ -32,7 +32,7 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <linux/reservation.h>
#include <linux/dma-resv.h>
#include <linux/export.h>
/**
......@@ -50,16 +50,15 @@ DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
/**
* reservation_object_list_alloc - allocate fence list
* dma_resv_list_alloc - allocate fence list
* @shared_max: number of fences we need space for
*
* Allocate a new reservation_object_list and make sure to correctly initialize
* Allocate a new dma_resv_list and make sure to correctly initialize
* shared_max.
*/
static struct reservation_object_list *
reservation_object_list_alloc(unsigned int shared_max)
static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
{
struct reservation_object_list *list;
struct dma_resv_list *list;
list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
if (!list)
......@@ -72,12 +71,12 @@ reservation_object_list_alloc(unsigned int shared_max)
}
/**
* reservation_object_list_free - free fence list
* dma_resv_list_free - free fence list
* @list: list to free
*
* Free a reservation_object_list and make sure to drop all references.
* Free a dma_resv_list and make sure to drop all references.
*/
static void reservation_object_list_free(struct reservation_object_list *list)
static void dma_resv_list_free(struct dma_resv_list *list)
{
unsigned int i;
......@@ -91,24 +90,24 @@ static void reservation_object_list_free(struct reservation_object_list *list)
}
/**
* reservation_object_init - initialize a reservation object
* dma_resv_init - initialize a reservation object
* @obj: the reservation object
*/
void reservation_object_init(struct reservation_object *obj)
void dma_resv_init(struct dma_resv *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
}
EXPORT_SYMBOL(reservation_object_init);
EXPORT_SYMBOL(dma_resv_init);
/**
* reservation_object_fini - destroys a reservation object
* dma_resv_fini - destroys a reservation object
* @obj: the reservation object
*/
void reservation_object_fini(struct reservation_object *obj)
void dma_resv_fini(struct dma_resv *obj)
{
struct reservation_object_list *fobj;
struct dma_resv_list *fobj;
struct dma_fence *excl;
/*
......@@ -120,32 +119,31 @@ void reservation_object_fini(struct reservation_object *obj)
dma_fence_put(excl);
fobj = rcu_dereference_protected(obj->fence, 1);
reservation_object_list_free(fobj);
dma_resv_list_free(fobj);
ww_mutex_destroy(&obj->lock);
}
EXPORT_SYMBOL(reservation_object_fini);
EXPORT_SYMBOL(dma_resv_fini);
/**
* reservation_object_reserve_shared - Reserve space to add shared fences to
* a reservation_object.
* dma_resv_reserve_shared - Reserve space to add shared fences to
* a dma_resv.
* @obj: reservation object
* @num_fences: number of fences we want to add
*
* Should be called before reservation_object_add_shared_fence(). Must
* Should be called before dma_resv_add_shared_fence(). Must
* be called with obj->lock held.
*
* RETURNS
* Zero for success, or -errno
*/
int reservation_object_reserve_shared(struct reservation_object *obj,
unsigned int num_fences)
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
{
struct reservation_object_list *old, *new;
struct dma_resv_list *old, *new;
unsigned int i, j, k, max;
reservation_object_assert_held(obj);
dma_resv_assert_held(obj);
old = reservation_object_get_list(obj);
old = dma_resv_get_list(obj);
if (old && old->shared_max) {
if ((old->shared_count + num_fences) <= old->shared_max)
......@@ -157,7 +155,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
max = 4;
}
new = reservation_object_list_alloc(max);
new = dma_resv_list_alloc(max);
if (!new)
return -ENOMEM;
......@@ -171,7 +169,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
struct dma_fence *fence;
fence = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
dma_resv_held(obj));
if (dma_fence_is_signaled(fence))
RCU_INIT_POINTER(new->shared[--k], fence);
else
......@@ -197,41 +195,40 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
struct dma_fence *fence;
fence = rcu_dereference_protected(new->shared[i],
reservation_object_held(obj));
dma_resv_held(obj));
dma_fence_put(fence);
}
kfree_rcu(old, rcu);
return 0;
}
EXPORT_SYMBOL(reservation_object_reserve_shared);
EXPORT_SYMBOL(dma_resv_reserve_shared);
/**
* reservation_object_add_shared_fence - Add a fence to a shared slot
* dma_resv_add_shared_fence - Add a fence to a shared slot
* @obj: the reservation object
* @fence: the shared fence to add
*
* Add a fence to a shared slot, obj->lock must be held, and
* reservation_object_reserve_shared() has been called.
* dma_resv_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct dma_fence *fence)
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
{
struct reservation_object_list *fobj;
struct dma_resv_list *fobj;
struct dma_fence *old;
unsigned int i, count;
dma_fence_get(fence);
reservation_object_assert_held(obj);
dma_resv_assert_held(obj);
fobj = reservation_object_get_list(obj);
fobj = dma_resv_get_list(obj);
count = fobj->shared_count;
for (i = 0; i < count; ++i) {
old = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
dma_resv_held(obj));
if (old->context == fence->context ||
dma_fence_is_signaled(old))
goto replace;
......@@ -247,25 +244,24 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
smp_store_mb(fobj->shared_count, count);
dma_fence_put(old);
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
* reservation_object_add_excl_fence - Add an exclusive fence.
* dma_resv_add_excl_fence - Add an exclusive fence.
* @obj: the reservation object
* @fence: the shared fence to add
*
* Add a fence to the exclusive slot. The obj->lock must be held.
*/
void reservation_object_add_excl_fence(struct reservation_object *obj,
struct dma_fence *fence)
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
struct dma_fence *old_fence = reservation_object_get_excl(obj);
struct reservation_object_list *old;
struct dma_fence *old_fence = dma_resv_get_excl(obj);
struct dma_resv_list *old;
u32 i = 0;
reservation_object_assert_held(obj);
dma_resv_assert_held(obj);
old = reservation_object_get_list(obj);
old = dma_resv_get_list(obj);
if (old)
i = old->shared_count;
......@@ -282,41 +278,40 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
/* inplace update, no shared fences */
while (i--)
dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
dma_resv_held(obj)));
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
EXPORT_SYMBOL(dma_resv_add_excl_fence);
/**
* reservation_object_copy_fences - Copy all fences from src to dst.
* dma_resv_copy_fences - Copy all fences from src to dst.
* @dst: the destination reservation object
* @src: the source reservation object
*
* Copy all fences from src to dst. dst-lock must be held.
*/
int reservation_object_copy_fences(struct reservation_object *dst,
struct reservation_object *src)
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
struct reservation_object_list *src_list, *dst_list;
struct dma_resv_list *src_list, *dst_list;
struct dma_fence *old, *new;
unsigned int i, shared_count;
reservation_object_assert_held(dst);
dma_resv_assert_held(dst);
rcu_read_lock();
retry:
reservation_object_fences(src, &new, &src_list, &shared_count);
dma_resv_fences(src, &new, &src_list, &shared_count);
if (shared_count) {
rcu_read_unlock();
dst_list = reservation_object_list_alloc(shared_count);
dst_list = dma_resv_list_alloc(shared_count);
if (!dst_list)
return -ENOMEM;
rcu_read_lock();
reservation_object_fences(src, &new, &src_list, &shared_count);
dma_resv_fences(src, &new, &src_list, &shared_count);
if (!src_list || shared_count > dst_list->shared_max) {
kfree(dst_list);
goto retry;
......@@ -332,7 +327,7 @@ int reservation_object_copy_fences(struct reservation_object *dst,
continue;
if (!dma_fence_get_rcu(fence)) {
reservation_object_list_free(dst_list);
dma_resv_list_free(dst_list);
goto retry;
}
......@@ -348,28 +343,28 @@ int reservation_object_copy_fences(struct reservation_object *dst,
}
if (new && !dma_fence_get_rcu(new)) {
reservation_object_list_free(dst_list);
dma_resv_list_free(dst_list);
goto retry;
}
rcu_read_unlock();
src_list = reservation_object_get_list(dst);
old = reservation_object_get_excl(dst);
src_list = dma_resv_get_list(dst);
old = dma_resv_get_excl(dst);
preempt_disable();
rcu_assign_pointer(dst->fence_excl, new);
rcu_assign_pointer(dst->fence, dst_list);
preempt_enable();
reservation_object_list_free(src_list);
dma_resv_list_free(src_list);
dma_fence_put(old);
return 0;
}
EXPORT_SYMBOL(reservation_object_copy_fences);
EXPORT_SYMBOL(dma_resv_copy_fences);
/**
* reservation_object_get_fences_rcu - Get an object's shared and exclusive
* dma_resv_get_fences_rcu - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
* @pfence_excl: the returned exclusive fence (or NULL)
......@@ -381,10 +376,10 @@ EXPORT_SYMBOL(reservation_object_copy_fences);
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well. Returns either zero or -ENOMEM.
*/
int reservation_object_get_fences_rcu(struct reservation_object *obj,
struct dma_fence **pfence_excl,
unsigned *pshared_count,
struct dma_fence ***pshared)
int dma_resv_get_fences_rcu(struct dma_resv *obj,
struct dma_fence **pfence_excl,
unsigned *pshared_count,
struct dma_fence ***pshared)
{
struct dma_fence **shared = NULL;
struct dma_fence *fence_excl;
......@@ -392,14 +387,14 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
int ret = 1;
do {
struct reservation_object_list *fobj;
struct dma_resv_list *fobj;
unsigned int i;
size_t sz = 0;
i = 0;
rcu_read_lock();
reservation_object_fences(obj, &fence_excl, &fobj,
dma_resv_fences(obj, &fence_excl, &fobj,
&shared_count);
if (fence_excl && !dma_fence_get_rcu(fence_excl))
......@@ -465,10 +460,10 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
*pshared = shared;
return ret;
}
EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
/**
* reservation_object_wait_timeout_rcu - Wait on reservation's objects
* dma_resv_wait_timeout_rcu - Wait on reservation's objects
* shared and/or exclusive fences.
* @obj: the reservation object
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
......@@ -479,11 +474,11 @@ EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than zer on success.
*/
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
bool wait_all, bool intr,
unsigned long timeout)
{
struct reservation_object_list *fobj;
struct dma_resv_list *fobj;
struct dma_fence *fence;
unsigned shared_count;
long ret = timeout ? timeout : 1;
......@@ -493,7 +488,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
rcu_read_lock();
i = -1;
reservation_object_fences(obj, &fence, &fobj, &shared_count);
dma_resv_fences(obj, &fence, &fobj, &shared_count);
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (!dma_fence_get_rcu(fence))
goto unlock_retry;
......@@ -541,11 +536,10 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
rcu_read_unlock();
goto retry;
}
EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
static inline int
reservation_object_test_signaled_single(struct dma_fence *passed_fence)
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
{
struct dma_fence *fence, *lfence = passed_fence;
int ret = 1;
......@@ -562,7 +556,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
}
/**
* reservation_object_test_signaled_rcu - Test if a reservation object's
* dma_resv_test_signaled_rcu - Test if a reservation object's
* fences have been signaled.
* @obj: the reservation object
* @test_all: if true, test all fences, otherwise only test the exclusive
......@@ -571,10 +565,9 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
* RETURNS
* true if all fences signaled, else false
*/
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
{
struct reservation_object_list *fobj;
struct dma_resv_list *fobj;
struct dma_fence *fence_excl;
unsigned shared_count;
int ret;
......@@ -583,14 +576,14 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
retry:
ret = true;
reservation_object_fences(obj, &fence_excl, &fobj, &shared_count);
dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
if (test_all) {
unsigned i;
for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
ret = reservation_object_test_signaled_single(fence);
ret = dma_resv_test_signaled_single(fence);
if (ret < 0)
goto retry;
else if (!ret)
......@@ -599,7 +592,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
}
if (!shared_count && fence_excl) {
ret = reservation_object_test_signaled_single(fence_excl);
ret = dma_resv_test_signaled_single(fence_excl);
if (ret < 0)
goto retry;
}
......@@ -607,4 +600,4 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
......@@ -218,14 +218,14 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct amdgpu_amdkfd_fence *ef)
{
struct reservation_object *resv = bo->tbo.base.resv;
struct reservation_object_list *old, *new;
struct dma_resv *resv = bo->tbo.base.resv;
struct dma_resv_list *old, *new;
unsigned int i, j, k;
if (!ef)
return -EINVAL;
old = reservation_object_get_list(resv);
old = dma_resv_get_list(resv);
if (!old)
return 0;
......@@ -241,7 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct dma_fence *f;
f = rcu_dereference_protected(old->shared[i],
reservation_object_held(resv));
dma_resv_held(resv));
if (f->context == ef->base.context)
RCU_INIT_POINTER(new->shared[--j], f);
......@@ -258,7 +258,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct dma_fence *f;
f = rcu_dereference_protected(new->shared[i],
reservation_object_held(resv));
dma_resv_held(resv));
dma_fence_put(f);
}
kfree_rcu(old, rcu);
......@@ -882,7 +882,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(vm->root.base.bo,
......@@ -2127,7 +2127,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
* Add process eviction fence to bo so they can
* evict each other.
*/
ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;