diff -rupN a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c --- a/drivers/dma-buf/dma-buf.c 2019-09-23 21:45:35.047572519 +0200 +++ b/drivers/dma-buf/dma-buf.c 2019-09-23 21:44:38.803735864 +0200 @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -104,8 +104,8 @@ static int dma_buf_release(struct inode list_del(&dmabuf->list_node); mutex_unlock(&db_list.lock); - if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) - dma_resv_fini(dmabuf->resv); + if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) + reservation_object_fini(dmabuf->resv); module_put(dmabuf->owner); kfree(dmabuf); @@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file * To support cross-device and cross-driver synchronization of buffer access * implicit fences (represented internally in the kernel with &struct fence) can * be attached to a &dma_buf. The glue for that and a few related things are - * provided in the &dma_resv structure. + * provided in the &reservation_object structure. * * Userspace can query the state of these implicitly tracked fences using poll() * and related system calls: @@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_f static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { struct dma_buf *dmabuf; - struct dma_resv *resv; - struct dma_resv_list *fobj; + struct reservation_object *resv; + struct reservation_object_list *fobj; struct dma_fence *fence_excl; __poll_t events; unsigned shared_count, seq; @@ -506,13 +506,13 @@ err_alloc_file: struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) { struct dma_buf *dmabuf; - struct dma_resv *resv = exp_info->resv; + struct reservation_object *resv = exp_info->resv; struct file *file; size_t alloc_size = sizeof(struct dma_buf); int ret; if (!exp_info->resv) - alloc_size += sizeof(struct dma_resv); + alloc_size += sizeof(struct reservation_object); else /* prevent &dma_buf[1] == dma_buf->resv */ alloc_size += 1; @@ -544,8 +544,8 @@ struct dma_buf *dma_buf_export(const str dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; if (!resv) { - resv = (struct dma_resv *)&dmabuf[1]; - dma_resv_init(resv); + resv = (struct reservation_object *)&dmabuf[1]; + reservation_object_init(resv); } dmabuf->resv = resv; @@ -909,11 +909,11 @@ static int __dma_buf_begin_cpu_access(st { bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); - struct dma_resv *resv = dmabuf->resv; + struct reservation_object *resv = dmabuf->resv; long ret; /* Wait on any implicit rendering fences */ - ret = dma_resv_wait_timeout_rcu(resv, write, true, + ret = reservation_object_wait_timeout_rcu(resv, write, true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; @@ -1154,8 +1154,8 @@ static int dma_buf_debug_show(struct seq int ret; struct dma_buf *buf_obj; struct dma_buf_attachment *attach_obj; - struct dma_resv *robj; - struct dma_resv_list *fobj; + struct reservation_object *robj; + struct reservation_object_list *fobj; struct dma_fence *fence; unsigned seq; int count = 0, attach_count, shared_count, i; diff -rupN a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c --- a/drivers/dma-buf/dma-fence-array.c 2019-09-23 21:45:35.047572519 +0200 +++ b/drivers/dma-buf/dma-fence-array.c 2019-09-23 21:44:38.803735864 +0200 @@ -13,8 +13,6 @@ #include #include -#define PENDING_ERROR 1 - static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) { return "dma_fence_array"; @@ -25,29 +23,10 @@ static const char *dma_fence_array_get_t return "unbound"; } -static void dma_fence_array_set_pending_error(struct dma_fence_array *array, - int error) -{ - /* - * Propagate the first error reported by any of our fences, but only - * before we ourselves are signaled. - */ - if (error) - cmpxchg(&array->base.error, PENDING_ERROR, error); -} - -static void dma_fence_array_clear_pending_error(struct dma_fence_array *array) -{ - /* Clear the error flag if not actually set. */ - cmpxchg(&array->base.error, PENDING_ERROR, 0); -} - static void irq_dma_fence_array_work(struct irq_work *wrk) { struct dma_fence_array *array = container_of(wrk, typeof(*array), work); - dma_fence_array_clear_pending_error(array); - dma_fence_signal(&array->base); dma_fence_put(&array->base); } @@ -59,8 +38,6 @@ static void dma_fence_array_cb_func(stru container_of(cb, struct dma_fence_array_cb, cb); struct dma_fence_array *array = array_cb->array; - dma_fence_array_set_pending_error(array, f->error); - if (atomic_dec_and_test(&array->num_pending)) irq_work_queue(&array->work); else @@ -86,14 +63,9 @@ static bool dma_fence_array_enable_signa dma_fence_get(&array->base); if (dma_fence_add_callback(array->fences[i], &cb[i].cb, dma_fence_array_cb_func)) { - int error = array->fences[i]->error; - - dma_fence_array_set_pending_error(array, error); dma_fence_put(&array->base); - if (atomic_dec_and_test(&array->num_pending)) { - dma_fence_array_clear_pending_error(array); + if (atomic_dec_and_test(&array->num_pending)) return false; - } } } @@ -170,8 +142,6 @@ struct dma_fence_array *dma_fence_array_ atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); array->fences = fences; - array->base.error = PENDING_ERROR; - return array; } EXPORT_SYMBOL(dma_fence_array_create); diff -rupN a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c --- a/drivers/dma-buf/dma-fence.c 2019-09-23 21:45:35.047572519 +0200 +++ b/drivers/dma-buf/dma-fence.c 2019-09-23 21:44:38.803735864 +0200 @@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_coun * * - Then there's also implicit fencing, where the synchronization points are * implicitly passed around as part of shared &dma_buf instances. Such - * implicit fences are stored in &struct dma_resv through the + * implicit fences are stored in &struct reservation_object through the * &dma_buf.resv pointer. */ @@ -129,27 +129,31 @@ EXPORT_SYMBOL(dma_fence_context_alloc); int dma_fence_signal_locked(struct dma_fence *fence) { struct dma_fence_cb *cur, *tmp; - struct list_head cb_list; + int ret = 0; lockdep_assert_held(fence->lock); - if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &fence->flags))) + if (WARN_ON(!fence)) return -EINVAL; - /* Stash the cb_list before replacing it with the timestamp */ - list_replace(&fence->cb_list, &cb_list); + if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + ret = -EINVAL; - fence->timestamp = ktime_get(); - set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); - trace_dma_fence_signaled(fence); + /* + * we might have raced with the unlocked dma_fence_signal, + * still run through all callbacks + */ + } else { + fence->timestamp = ktime_get(); + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); + trace_dma_fence_signaled(fence); + } - list_for_each_entry_safe(cur, tmp, &cb_list, node) { - INIT_LIST_HEAD(&cur->node); + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + list_del_init(&cur->node); cur->func(fence, cur); } - - return 0; + return ret; } EXPORT_SYMBOL(dma_fence_signal_locked); @@ -169,16 +173,28 @@ EXPORT_SYMBOL(dma_fence_signal_locked); int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; - int ret; if (!fence) return -EINVAL; - spin_lock_irqsave(fence->lock, flags); - ret = dma_fence_signal_locked(fence); - spin_unlock_irqrestore(fence->lock, flags); + if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return -EINVAL; + + fence->timestamp = ktime_get(); + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); + trace_dma_fence_signaled(fence); + + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { + struct dma_fence_cb *cur, *tmp; - return ret; + spin_lock_irqsave(fence->lock, flags); + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + list_del_init(&cur->node); + cur->func(fence, cur); + } + spin_unlock_irqrestore(fence->lock, flags); + } + return 0; } EXPORT_SYMBOL(dma_fence_signal); @@ -232,8 +248,7 @@ void dma_fence_release(struct kref *kref trace_dma_fence_destroy(fence); - if (WARN(!list_empty(&fence->cb_list) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), + if (WARN(!list_empty(&fence->cb_list), "Fence %s:%s:%llx:%llx released with pending signals!\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), diff -rupN a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c --- a/drivers/dma-buf/dma-fence-chain.c 2019-09-23 21:45:35.047572519 +0200 +++ b/drivers/dma-buf/dma-fence-chain.c 2019-09-23 21:44:38.807735853 +0200 @@ -178,30 +178,8 @@ static bool dma_fence_chain_signaled(str static void dma_fence_chain_release(struct dma_fence *fence) { struct dma_fence_chain *chain = to_dma_fence_chain(fence); - struct dma_fence *prev; - - /* Manually unlink the chain as much as possible to avoid recursion - * and potential stack overflow. - */ - while ((prev = rcu_dereference_protected(chain->prev, true))) { - struct dma_fence_chain *prev_chain; - - if (kref_read(&prev->refcount) > 1) - break; - - prev_chain = to_dma_fence_chain(prev); - if (!prev_chain) - break; - - /* No need for atomic operations since we hold the last - * reference to prev_chain. - */ - chain->prev = prev_chain->prev; - RCU_INIT_POINTER(prev_chain->prev, NULL); - dma_fence_put(prev); - } - dma_fence_put(prev); + dma_fence_put(rcu_dereference_protected(chain->prev, true)); dma_fence_put(chain->fence); dma_fence_free(fence); } diff -rupN a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig --- a/drivers/dma-buf/Kconfig 2019-09-23 21:45:35.047572519 +0200 +++ b/drivers/dma-buf/Kconfig 2019-09-23 21:44:38.807735853 +0200 @@ -39,9 +39,4 @@ config UDMABUF A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. -config DMABUF_SELFTESTS - tristate "Selftests for the dma-buf interfaces" - default n - depends on DMA_SHARED_BUFFER - endmenu diff -rupN a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile --- a/drivers/dma-buf/Makefile 2019-09-23 21:45:35.047572519 +0200 +++ b/drivers/dma-buf/Makefile 2019-09-23 21:44:38.807735853 +0200 @@ -1,12 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ - dma-resv.o seqno-fence.o + reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o - -dmabuf_selftests-y := \ - selftest.o \ - st-dma-fence.o - -obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o diff -rupN a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c --- a/drivers/dma-buf/reservation.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/dma-buf/reservation.c 2019-09-23 21:44:38.807735853 +0200 @@ -0,0 +1,591 @@ +/* + * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) + * + * Based on bo.c which bears the following copyright notice, + * but is dual licensed: + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include +#include + +/** + * DOC: Reservation Object Overview + * + * The reservation object provides a mechanism to manage shared and + * exclusive fences associated with a buffer. A reservation object + * can have attached one exclusive fence (normally associated with + * write operations) or N shared fences (read operations). The RCU + * mechanism is used to protect read access to fences from locked + * write-side updates. + */ + +DEFINE_WD_CLASS(reservation_ww_class); +EXPORT_SYMBOL(reservation_ww_class); + +struct lock_class_key reservation_seqcount_class; +EXPORT_SYMBOL(reservation_seqcount_class); + +const char reservation_seqcount_string[] = "reservation_seqcount"; +EXPORT_SYMBOL(reservation_seqcount_string); + +/** + * reservation_object_reserve_shared - Reserve space to add shared fences to + * a reservation_object. + * @obj: reservation object + * @num_fences: number of fences we want to add + * + * Should be called before reservation_object_add_shared_fence(). Must + * be called with obj->lock held. + * + * RETURNS + * Zero for success, or -errno + */ +int reservation_object_reserve_shared(struct reservation_object *obj, + unsigned int num_fences) +{ + struct reservation_object_list *old, *new; + unsigned int i, j, k, max; + + reservation_object_assert_held(obj); + + old = reservation_object_get_list(obj); + + if (old && old->shared_max) { + if ((old->shared_count + num_fences) <= old->shared_max) + return 0; + else + max = max(old->shared_count + num_fences, + old->shared_max * 2); + } else { + max = 4; + } + + new = kmalloc(offsetof(typeof(*new), shared[max]), GFP_KERNEL); + if (!new) + return -ENOMEM; + + /* + * no need to bump fence refcounts, rcu_read access + * requires the use of kref_get_unless_zero, and the + * references from the old struct are carried over to + * the new. + */ + for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) { + struct dma_fence *fence; + + fence = rcu_dereference_protected(old->shared[i], + reservation_object_held(obj)); + if (dma_fence_is_signaled(fence)) + RCU_INIT_POINTER(new->shared[--k], fence); + else + RCU_INIT_POINTER(new->shared[j++], fence); + } + new->shared_count = j; + new->shared_max = max; + + preempt_disable(); + write_seqcount_begin(&obj->seq); + /* + * RCU_INIT_POINTER can be used here, + * seqcount provides the necessary barriers + */ + RCU_INIT_POINTER(obj->fence, new); + write_seqcount_end(&obj->seq); + preempt_enable(); + + if (!old) + return 0; + + /* Drop the references to the signaled fences */ + for (i = k; i < new->shared_max; ++i) { + struct dma_fence *fence; + + fence = rcu_dereference_protected(new->shared[i], + reservation_object_held(obj)); + dma_fence_put(fence); + } + kfree_rcu(old, rcu); + + return 0; +} +EXPORT_SYMBOL(reservation_object_reserve_shared); + +/** + * reservation_object_add_shared_fence - Add a fence to a shared slot + * @obj: the reservation object + * @fence: the shared fence to add + * + * Add a fence to a shared slot, obj->lock must be held, and + * reservation_object_reserve_shared() has been called. + */ +void reservation_object_add_shared_fence(struct reservation_object *obj, + struct dma_fence *fence) +{ + struct reservation_object_list *fobj; + unsigned int i, count; + + dma_fence_get(fence); + + reservation_object_assert_held(obj); + + fobj = reservation_object_get_list(obj); + count = fobj->shared_count; + + preempt_disable(); + write_seqcount_begin(&obj->seq); + + for (i = 0; i < count; ++i) { + struct dma_fence *old_fence; + + old_fence = rcu_dereference_protected(fobj->shared[i], + reservation_object_held(obj)); + if (old_fence->context == fence->context || + dma_fence_is_signaled(old_fence)) { + dma_fence_put(old_fence); + goto replace; + } + } + + BUG_ON(fobj->shared_count >= fobj->shared_max); + count++; + +replace: + RCU_INIT_POINTER(fobj->shared[i], fence); + /* pointer update must be visible before we extend the shared_count */ + smp_store_mb(fobj->shared_count, count); + + write_seqcount_end(&obj->seq); + preempt_enable(); +} +EXPORT_SYMBOL(reservation_object_add_shared_fence); + +/** + * reservation_object_add_excl_fence - Add an exclusive fence. + * @obj: the reservation object + * @fence: the shared fence to add + * + * Add a fence to the exclusive slot. The obj->lock must be held. + */ +void reservation_object_add_excl_fence(struct reservation_object *obj, + struct dma_fence *fence) +{ + struct dma_fence *old_fence = reservation_object_get_excl(obj); + struct reservation_object_list *old; + u32 i = 0; + + reservation_object_assert_held(obj); + + old = reservation_object_get_list(obj); + if (old) + i = old->shared_count; + + if (fence) + dma_fence_get(fence); + + preempt_disable(); + write_seqcount_begin(&obj->seq); + /* write_seqcount_begin provides the necessary memory barrier */ + RCU_INIT_POINTER(obj->fence_excl, fence); + if (old) + old->shared_count = 0; + write_seqcount_end(&obj->seq); + preempt_enable(); + + /* inplace update, no shared fences */ + while (i--) + dma_fence_put(rcu_dereference_protected(old->shared[i], + reservation_object_held(obj))); + + dma_fence_put(old_fence); +} +EXPORT_SYMBOL(reservation_object_add_excl_fence); + +/** +* reservation_object_copy_fences - Copy all fences from src to dst. +* @dst: the destination reservation object +* @src: the source reservation object +* +* Copy all fences from src to dst. dst-lock must be held. +*/ +int reservation_object_copy_fences(struct reservation_object *dst, + struct reservation_object *src) +{ + struct reservation_object_list *src_list, *dst_list; + struct dma_fence *old, *new; + size_t size; + unsigned i; + + reservation_object_assert_held(dst); + + rcu_read_lock(); + src_list = rcu_dereference(src->fence); + +retry: + if (src_list) { + unsigned shared_count = src_list->shared_count; + + size = offsetof(typeof(*src_list), shared[shared_count]); + rcu_read_unlock(); + + dst_list = kmalloc(size, GFP_KERNEL); + if (!dst_list) + return -ENOMEM; + + rcu_read_lock(); + src_list = rcu_dereference(src->fence); + if (!src_list || src_list->shared_count > shared_count) { + kfree(dst_list); + goto retry; + } + + dst_list->shared_count = 0; + dst_list->shared_max = shared_count; + for (i = 0; i < src_list->shared_count; ++i) { + struct dma_fence *fence; + + fence = rcu_dereference(src_list->shared[i]); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags)) + continue; + + if (!dma_fence_get_rcu(fence)) { + kfree(dst_list); + src_list = rcu_dereference(src->fence); + goto retry; + } + + if (dma_fence_is_signaled(fence)) { + dma_fence_put(fence); + continue; + } + + rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence); + } + } else { + dst_list = NULL; + } + + new = dma_fence_get_rcu_safe(&src->fence_excl); + rcu_read_unlock(); + + src_list = reservation_object_get_list(dst); + old = reservation_object_get_excl(dst); + + preempt_disable(); + write_seqcount_begin(&dst->seq); + /* write_seqcount_begin provides the necessary memory barrier */ + RCU_INIT_POINTER(dst->fence_excl, new); + RCU_INIT_POINTER(dst->fence, dst_list); + write_seqcount_end(&dst->seq); + preempt_enable(); + + if (src_list) + kfree_rcu(src_list, rcu); + dma_fence_put(old); + + return 0; +} +EXPORT_SYMBOL(reservation_object_copy_fences); + +/** + * reservation_object_get_fences_rcu - Get an object's shared and exclusive + * fences without update side lock held + * @obj: the reservation object + * @pfence_excl: the returned exclusive fence (or NULL) + * @pshared_count: the number of shared fences returned + * @pshared: the array of shared fence ptrs returned (array is krealloc'd to + * the required size, and must be freed by caller) + * + * Retrieve all fences from the reservation object. If the pointer for the + * exclusive fence is not specified the fence is put into the array of the + * shared fences as well. Returns either zero or -ENOMEM. + */ +int reservation_object_get_fences_rcu(struct reservation_object *obj, + struct dma_fence **pfence_excl, + unsigned *pshared_count, + struct dma_fence ***pshared) +{ + struct dma_fence **shared = NULL; + struct dma_fence *fence_excl; + unsigned int shared_count; + int ret = 1; + + do { + struct reservation_object_list *fobj; + unsigned int i, seq; + size_t sz = 0; + + shared_count = i = 0; + + rcu_read_lock(); + seq = read_seqcount_begin(&obj->seq); + + fence_excl = rcu_dereference(obj->fence_excl); + if (fence_excl && !dma_fence_get_rcu(fence_excl)) + goto unlock; + + fobj = rcu_dereference(obj->fence); + if (fobj) + sz += sizeof(*shared) * fobj->shared_max; + + if (!pfence_excl && fence_excl) + sz += sizeof(*shared); + + if (sz) { + struct dma_fence **nshared; + + nshared = krealloc(shared, sz, + GFP_NOWAIT | __GFP_NOWARN); + if (!nshared) { + rcu_read_unlock(); + + dma_fence_put(fence_excl); + fence_excl = NULL; + + nshared = krealloc(shared, sz, GFP_KERNEL); + if (nshared) { + shared = nshared; + continue; + } + + ret = -ENOMEM; + break; + } + shared = nshared; + shared_count = fobj ? fobj->shared_count : 0; + for (i = 0; i < shared_count; ++i) { + shared[i] = rcu_dereference(fobj->shared[i]); + if (!dma_fence_get_rcu(shared[i])) + break; + } + + if (!pfence_excl && fence_excl) { + shared[i] = fence_excl; + fence_excl = NULL; + ++i; + ++shared_count; + } + } + + if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { + while (i--) + dma_fence_put(shared[i]); + dma_fence_put(fence_excl); + goto unlock; + } + + ret = 0; +unlock: + rcu_read_unlock(); + } while (ret); + + if (!shared_count) { + kfree(shared); + shared = NULL; + } + + *pshared_count = shared_count; + *pshared = shared; + if (pfence_excl) + *pfence_excl = fence_excl; + + return ret; +} +EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); + +/** + * reservation_object_wait_timeout_rcu - Wait on reservation's objects + * shared and/or exclusive fences. + * @obj: the reservation object + * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @intr: if true, do interruptible wait + * @timeout: timeout value in jiffies or zero to return immediately + * + * RETURNS + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or + * greater than zer on success. + */ +long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + bool wait_all, bool intr, + unsigned long timeout) +{ + struct dma_fence *fence; + unsigned seq, shared_count; + long ret = timeout ? timeout : 1; + int i; + +retry: + shared_count = 0; + seq = read_seqcount_begin(&obj->seq); + rcu_read_lock(); + i = -1; + + fence = rcu_dereference(obj->fence_excl); + if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + if (!dma_fence_get_rcu(fence)) + goto unlock_retry; + + if (dma_fence_is_signaled(fence)) { + dma_fence_put(fence); + fence = NULL; + } + + } else { + fence = NULL; + } + + if (wait_all) { + struct reservation_object_list *fobj = + rcu_dereference(obj->fence); + + if (fobj) + shared_count = fobj->shared_count; + + for (i = 0; !fence && i < shared_count; ++i) { + struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &lfence->flags)) + continue; + + if (!dma_fence_get_rcu(lfence)) + goto unlock_retry; + + if (dma_fence_is_signaled(lfence)) { + dma_fence_put(lfence); + continue; + } + + fence = lfence; + break; + } + } + + rcu_read_unlock(); + if (fence) { + if (read_seqcount_retry(&obj->seq, seq)) { + dma_fence_put(fence); + goto retry; + } + + ret = dma_fence_wait_timeout(fence, intr, ret); + dma_fence_put(fence); + if (ret > 0 && wait_all && (i + 1 < shared_count)) + goto retry; + } + return ret; + +unlock_retry: + rcu_read_unlock(); + goto retry; +} +EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); + + +static inline int +reservation_object_test_signaled_single(struct dma_fence *passed_fence) +{ + struct dma_fence *fence, *lfence = passed_fence; + int ret = 1; + + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { + fence = dma_fence_get_rcu(lfence); + if (!fence) + return -1; + + ret = !!dma_fence_is_signaled(fence); + dma_fence_put(fence); + } + return ret; +} + +/** + * reservation_object_test_signaled_rcu - Test if a reservation object's + * fences have been signaled. + * @obj: the reservation object + * @test_all: if true, test all fences, otherwise only test the exclusive + * fence + * + * RETURNS + * true if all fences signaled, else false + */ +bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + bool test_all) +{ + unsigned seq, shared_count; + int ret; + + rcu_read_lock(); +retry: + ret = true; + shared_count = 0; + seq = read_seqcount_begin(&obj->seq); + + if (test_all) { + unsigned i; + + struct reservation_object_list *fobj = + rcu_dereference(obj->fence); + + if (fobj) + shared_count = fobj->shared_count; + + for (i = 0; i < shared_count; ++i) { + struct dma_fence *fence = rcu_dereference(fobj->shared[i]); + + ret = reservation_object_test_signaled_single(fence); + if (ret < 0) + goto retry; + else if (!ret) + break; + } + + if (read_seqcount_retry(&obj->seq, seq)) + goto retry; + } + + if (!shared_count) { + struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); + + if (fence_excl) { + ret = reservation_object_test_signaled_single( + fence_excl); + if (ret < 0) + goto retry; + + if (read_seqcount_retry(&obj->seq, seq)) + goto retry; + } + } + + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); diff -rupN a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c --- a/drivers/dma-buf/sw_sync.c 2019-09-23 21:45:35.047572519 +0200 +++ b/drivers/dma-buf/sw_sync.c 2019-09-23 21:44:38.807735853 +0200 @@ -132,14 +132,17 @@ static void timeline_fence_release(struc { struct sync_pt *pt = dma_fence_to_sync_pt(fence); struct sync_timeline *parent = dma_fence_parent(fence); - unsigned long flags; - spin_lock_irqsave(fence->lock, flags); if (!list_empty(&pt->link)) { - list_del(&pt->link); - rb_erase(&pt->node, &parent->pt_tree); + unsigned long flags; + + spin_lock_irqsave(fence->lock, flags); + if (!list_empty(&pt->link)) { + list_del(&pt->link); + rb_erase(&pt->node, &parent->pt_tree); + } + spin_unlock_irqrestore(fence->lock, flags); } - spin_unlock_irqrestore(fence->lock, flags); sync_timeline_put(parent); dma_fence_free(fence); @@ -262,8 +265,7 @@ static struct sync_pt *sync_pt_create(st p = &parent->rb_left; } else { if (dma_fence_get_rcu(&other->base)) { - sync_timeline_put(obj); - kfree(pt); + dma_fence_put(&pt->base); pt = other; goto unlock; } diff -rupN a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c --- a/drivers/dma-buf/sync_file.c 2019-09-23 21:45:35.047572519 +0200 +++ b/drivers/dma-buf/sync_file.c 2019-09-23 21:44:38.807735853 +0200 @@ -419,7 +419,7 @@ static long sync_file_ioctl_fence_info(s * info->num_fences. */ if (!info.num_fences) { - info.status = dma_fence_get_status(sync_file->fence); + info.status = dma_fence_is_signaled(sync_file->fence); goto no_fences; } else { info.status = 1; diff -rupN a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c --- a/drivers/gpu/drm/drm_agpsupport.c 2019-09-23 21:45:36.271568815 +0200 +++ b/drivers/gpu/drm/drm_agpsupport.c 2019-09-23 21:44:38.807735853 +0200 @@ -1,4 +1,4 @@ -/* +/** * \file drm_agpsupport.c * DRM support for AGP/GART backend * @@ -465,3 +465,46 @@ void drm_legacy_agp_clear(struct drm_dev dev->agp->acquired = 0; dev->agp->enabled = 0; } + +/** + * Binds a collection of pages into AGP memory at the given offset, returning + * the AGP memory structure containing them. + * + * No reference is held on the pages during this time -- it is up to the + * caller to handle that. + */ +struct agp_memory * +drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset, + u32 type) +{ + struct agp_memory *mem; + int ret, i; + + DRM_DEBUG("\n"); + + mem = agp_allocate_memory(dev->agp->bridge, num_pages, + type); + if (mem == NULL) { + DRM_ERROR("Failed to allocate memory for %ld pages\n", + num_pages); + return NULL; + } + + for (i = 0; i < num_pages; i++) + mem->pages[i] = pages[i]; + mem->page_count = num_pages; + + mem->is_flushed = true; + ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE); + if (ret != 0) { + DRM_ERROR("Failed to bind AGP memory: %d\n", ret); + agp_free_memory(mem); + return NULL; + } + + return mem; +} +EXPORT_SYMBOL(drm_agp_bind_pages); diff -rupN a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c --- a/drivers/gpu/drm/drm_atomic_uapi.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_atomic_uapi.c 2019-09-23 21:44:38.807735853 +0200 @@ -747,8 +747,6 @@ static int drm_atomic_connector_set_prop return -EINVAL; } state->content_protection = val; - } else if (property == config->hdcp_content_type_property) { - state->hdcp_content_type = val; } else if (property == connector->colorspace_property) { state->colorspace = val; } else if (property == config->writeback_fb_id_property) { @@ -833,8 +831,6 @@ drm_atomic_connector_get_property(struct state->hdr_output_metadata->base.id : 0; } else if (property == config->content_protection_property) { *val = state->content_protection; - } else if (property == config->hdcp_content_type_property) { - *val = state->hdcp_content_type; } else if (property == config->writeback_fb_id_property) { /* Writeback framebuffer is one-shot, write and forget */ *val = 0; @@ -1037,7 +1033,7 @@ int drm_atomic_set_property(struct drm_a * As a contrast, with implicit fencing the kernel keeps track of any * ongoing rendering, and automatically ensures that the atomic update waits * for any pending rendering to complete. For shared buffers represented with - * a &struct dma_buf this is tracked in &struct dma_resv. + * a &struct dma_buf this is tracked in &struct reservation_object. * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), * whereas explicit fencing is what Android wants. * diff -rupN a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c --- a/drivers/gpu/drm/drm_client.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_client.c 2019-09-23 21:44:38.807735853 +0200 @@ -59,6 +59,7 @@ static void drm_client_close(struct drm_ drm_file_free(client->file); } +EXPORT_SYMBOL(drm_client_close); /** * drm_client_init - Initialise a DRM client diff -rupN a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c --- a/drivers/gpu/drm/drm_connector.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_connector.c 2019-09-23 21:44:38.807735853 +0200 @@ -92,7 +92,6 @@ static struct drm_conn_prop_enum_list dr { DRM_MODE_CONNECTOR_DSI, "DSI" }, { DRM_MODE_CONNECTOR_DPI, "DPI" }, { DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" }, - { DRM_MODE_CONNECTOR_SPI, "SPI" }, }; void drm_connector_ida_init(void) @@ -141,7 +140,8 @@ static void drm_connector_get_cmdline_mo } DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n", - connector->name, mode->name, + connector->name, + mode->name, mode->xres, mode->yres, mode->refresh_specified ? mode->refresh : 60, mode->rb ? " reduced blanking" : "", @@ -298,41 +298,6 @@ out_put: EXPORT_SYMBOL(drm_connector_init); /** - * drm_connector_init_with_ddc - Init a preallocated connector - * @dev: DRM device - * @connector: the connector to init - * @funcs: callbacks for this connector - * @connector_type: user visible type of the connector - * @ddc: pointer to the associated ddc adapter - * - * Initialises a preallocated connector. Connectors should be - * subclassed as part of driver connector objects. - * - * Ensures that the ddc field of the connector is correctly set. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_connector_init_with_ddc(struct drm_device *dev, - struct drm_connector *connector, - const struct drm_connector_funcs *funcs, - int connector_type, - struct i2c_adapter *ddc) -{ - int ret; - - ret = drm_connector_init(dev, connector, funcs, connector_type); - if (ret) - return ret; - - /* provide ddc symlink in sysfs */ - connector->ddc = ddc; - - return ret; -} -EXPORT_SYMBOL(drm_connector_init_with_ddc); - -/** * drm_connector_attach_edid_property - attach edid property. * @connector: the connector * @@ -983,72 +948,10 @@ static const struct drm_prop_enum_list h * - If the state is DESIRED, kernel should attempt to re-authenticate the * link whenever possible. This includes across disable/enable, dpms, * hotplug, downstream device changes, link status failures, etc.. - * - Kernel sends uevent with the connector id and property id through - * @drm_hdcp_update_content_protection, upon below kernel triggered - * scenarios: - * - * - DESIRED -> ENABLED (authentication success) - * - ENABLED -> DESIRED (termination of authentication) - * - Please note no uevents for userspace triggered property state changes, - * which can't fail such as - * - * - DESIRED/ENABLED -> UNDESIRED - * - UNDESIRED -> DESIRED - * - Userspace is responsible for polling the property or listen to uevents - * to determine when the value transitions from ENABLED to DESIRED. - * This signifies the link is no longer protected and userspace should - * take appropriate action (whatever that might be). - * - * HDCP Content Type: - * This Enum property is used by the userspace to declare the content type - * of the display stream, to kernel. Here display stream stands for any - * display content that userspace intended to display through HDCP - * encryption. - * - * Content Type of a stream is decided by the owner of the stream, as - * "HDCP Type0" or "HDCP Type1". - * - * The value of the property can be one of the below: - * - "HDCP Type0": DRM_MODE_HDCP_CONTENT_TYPE0 = 0 - * - "HDCP Type1": DRM_MODE_HDCP_CONTENT_TYPE1 = 1 - * - * When kernel starts the HDCP authentication (see "Content Protection" - * for details), it uses the content type in "HDCP Content Type" - * for performing the HDCP authentication with the display sink. - * - * Please note in HDCP spec versions, a link can be authenticated with - * HDCP 2.2 for Content Type 0/Content Type 1. Where as a link can be - * authenticated with HDCP1.4 only for Content Type 0(though it is implicit - * in nature. As there is no reference for Content Type in HDCP1.4). - * - * HDCP2.2 authentication protocol itself takes the "Content Type" as a - * parameter, which is a input for the DP HDCP2.2 encryption algo. - * - * In case of Type 0 content protection request, kernel driver can choose - * either of HDCP spec versions 1.4 and 2.2. When HDCP2.2 is used for - * "HDCP Type 0", a HDCP 2.2 capable repeater in the downstream can send - * that content to a HDCP 1.4 authenticated HDCP sink (Type0 link). - * But if the content is classified as "HDCP Type 1", above mentioned - * HDCP 2.2 repeater wont send the content to the HDCP sink as it can't - * authenticate the HDCP1.4 capable sink for "HDCP Type 1". - * - * Please note userspace can be ignorant of the HDCP versions used by the - * kernel driver to achieve the "HDCP Content Type". - * - * At current scenario, classifying a content as Type 1 ensures that the - * content will be displayed only through the HDCP2.2 encrypted link. - * - * Note that the HDCP Content Type property is introduced at HDCP 2.2, and - * defaults to type 0. It is only exposed by drivers supporting HDCP 2.2 - * (hence supporting Type 0 and Type 1). Based on how next versions of - * HDCP specs are defined content Type could be used for higher versions - * too. - * - * If content type is changed when "Content Protection" is not UNDESIRED, - * then kernel will disable the HDCP and re-enable with new type in the - * same atomic commit. And when "Content Protection" is ENABLED, it means - * that link is HDCP authenticated and encrypted, for the transmission of - * the Type of stream mentioned at "HDCP Content Type". + * - Userspace is responsible for polling the property to determine when + * the value transitions from ENABLED to DESIRED. This signifies the link + * is no longer protected and userspace should take appropriate action + * (whatever that might be). * * HDR_OUTPUT_METADATA: * Connector property to enable userspace to send HDR Metadata to diff -rupN a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c --- a/drivers/gpu/drm/drm_crtc_helper.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_crtc_helper.c 2019-09-23 21:44:38.807735853 +0200 @@ -159,10 +159,14 @@ drm_encoder_disable(struct drm_encoder * if (!encoder_funcs) return; + drm_bridge_disable(encoder->bridge); + if (encoder_funcs->disable) (*encoder_funcs->disable)(encoder); else if (encoder_funcs->dpms) (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); + + drm_bridge_post_disable(encoder->bridge); } static void __drm_helper_disable_unused_functions(struct drm_device *dev) @@ -322,6 +326,13 @@ bool drm_crtc_helper_set_mode(struct drm if (!encoder_funcs) continue; + ret = drm_bridge_mode_fixup(encoder->bridge, + mode, adjusted_mode); + if (!ret) { + DRM_DEBUG_KMS("Bridge fixup failed\n"); + goto done; + } + encoder_funcs = encoder->helper_private; if (encoder_funcs->mode_fixup) { if (!(ret = encoder_funcs->mode_fixup(encoder, mode, @@ -353,9 +364,13 @@ bool drm_crtc_helper_set_mode(struct drm if (!encoder_funcs) continue; + drm_bridge_disable(encoder->bridge); + /* Disable the encoders as the first thing we do. */ if (encoder_funcs->prepare) encoder_funcs->prepare(encoder); + + drm_bridge_post_disable(encoder->bridge); } drm_crtc_prepare_encoders(dev); @@ -382,6 +397,8 @@ bool drm_crtc_helper_set_mode(struct drm encoder->base.id, encoder->name, mode->name); if (encoder_funcs->mode_set) encoder_funcs->mode_set(encoder, mode, adjusted_mode); + + drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode); } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ @@ -396,8 +413,12 @@ bool drm_crtc_helper_set_mode(struct drm if (!encoder_funcs) continue; + drm_bridge_pre_enable(encoder->bridge); + if (encoder_funcs->commit) encoder_funcs->commit(encoder); + + drm_bridge_enable(encoder->bridge); } /* Calculate and store various constants which @@ -796,14 +817,25 @@ static int drm_helper_choose_encoder_dpm /* Helper which handles bridge ordering around encoder dpms */ static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode) { + struct drm_bridge *bridge = encoder->bridge; const struct drm_encoder_helper_funcs *encoder_funcs; encoder_funcs = encoder->helper_private; if (!encoder_funcs) return; + if (mode == DRM_MODE_DPMS_ON) + drm_bridge_pre_enable(bridge); + else + drm_bridge_disable(bridge); + if (encoder_funcs->dpms) encoder_funcs->dpms(encoder, mode); + + if (mode == DRM_MODE_DPMS_ON) + drm_bridge_enable(bridge); + else + drm_bridge_post_disable(bridge); } static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) diff -rupN a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c --- a/drivers/gpu/drm/drm_debugfs_crc.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_debugfs_crc.c 2019-09-23 21:44:38.807735853 +0200 @@ -66,18 +66,9 @@ * the reported CRCs of frames that should have the same contents. * * On the driver side the implementation effort is minimal, drivers only need to - * implement &drm_crtc_funcs.set_crc_source and &drm_crtc_funcs.verify_crc_source. - * The debugfs files are automatically set up if those vfuncs are set. CRC samples - * need to be captured in the driver by calling drm_crtc_add_crc_entry(). - * Depending on the driver and HW requirements, &drm_crtc_funcs.set_crc_source - * may result in a commit (even a full modeset). - * - * CRC results must be reliable across non-full-modeset atomic commits, so if a - * commit via DRM_IOCTL_MODE_ATOMIC would disable or otherwise interfere with - * CRC generation, then the driver must mark that commit as a full modeset - * (drm_atomic_crtc_needs_modeset() should return true). As a result, to ensure - * consistent results, generic userspace must re-setup CRC generation after a - * legacy SETCRTC or an atomic commit with DRM_MODE_ATOMIC_ALLOW_MODESET. + * implement &drm_crtc_funcs.set_crc_source. The debugfs files are automatically + * set up if that vfunc is set. CRC samples need to be captured in the driver by + * calling drm_crtc_add_crc_entry(). */ static int crc_control_show(struct seq_file *m, void *data) diff -rupN a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c --- a/drivers/gpu/drm/drm_dma.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_dma.c 2019-09-23 21:44:38.807735853 +0200 @@ -1,4 +1,4 @@ -/* +/** * \file drm_dma.c * DMA IOCTL and function support * diff -rupN a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c --- a/drivers/gpu/drm/drm_dp_aux_dev.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_dp_aux_dev.c 2019-09-23 21:44:38.807735853 +0200 @@ -37,7 +37,6 @@ #include #include -#include #include #include "drm_crtc_helper_internal.h" @@ -83,7 +82,8 @@ static struct drm_dp_aux_dev *alloc_drm_ kref_init(&aux_dev->refcount); mutex_lock(&aux_idr_mutex); - index = idr_alloc(&aux_idr, aux_dev, 0, DRM_AUX_MINORS, GFP_KERNEL); + index = idr_alloc_cyclic(&aux_idr, aux_dev, 0, DRM_AUX_MINORS, + GFP_KERNEL); mutex_unlock(&aux_idr_mutex); if (index < 0) { kfree(aux_dev); @@ -163,12 +163,7 @@ static ssize_t auxdev_read_iter(struct k break; } - if (aux_dev->aux->is_remote) - res = drm_dp_mst_dpcd_read(aux_dev->aux, pos, buf, - todo); - else - res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo); - + res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo); if (res <= 0) break; @@ -215,12 +210,7 @@ static ssize_t auxdev_write_iter(struct break; } - if (aux_dev->aux->is_remote) - res = drm_dp_mst_dpcd_write(aux_dev->aux, pos, buf, - todo); - else - res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo); - + res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo); if (res <= 0) break; diff -rupN a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c --- a/drivers/gpu/drm/drm_dp_helper.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_dp_helper.c 2019-09-23 21:44:38.807735853 +0200 @@ -152,15 +152,38 @@ EXPORT_SYMBOL(drm_dp_link_train_channel_ u8 drm_dp_link_rate_to_bw_code(int link_rate) { - /* Spec says link_bw = link_rate / 0.27Gbps */ - return link_rate / 27000; + switch (link_rate) { + default: + WARN(1, "unknown DP link rate %d, using %x\n", link_rate, + DP_LINK_BW_1_62); + /* fall through */ + case 162000: + return DP_LINK_BW_1_62; + case 270000: + return DP_LINK_BW_2_7; + case 540000: + return DP_LINK_BW_5_4; + case 810000: + return DP_LINK_BW_8_1; + } } EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code); int drm_dp_bw_code_to_link_rate(u8 link_bw) { - /* Spec says link_rate = link_bw * 0.27Gbps */ - return link_bw * 27000; + switch (link_bw) { + default: + WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw); + /* fall through */ + case DP_LINK_BW_1_62: + return 162000; + case DP_LINK_BW_2_7: + return 270000; + case DP_LINK_BW_5_4: + return 540000; + case DP_LINK_BW_8_1: + return 810000; + } } EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); diff -rupN a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c --- a/drivers/gpu/drm/drm_dp_mst_topology.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c 2019-09-23 21:44:38.811735842 +0200 @@ -36,8 +36,6 @@ #include #include -#include "drm_crtc_helper_internal.h" - /** * DOC: dp mst helper * @@ -55,9 +53,6 @@ static int drm_dp_dpcd_write_payload(str int id, struct drm_dp_payload *payload); -static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, - int offset, int size, u8 *bytes); static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); @@ -1488,52 +1483,6 @@ static bool drm_dp_port_setup_pdt(struct return send_link; } -/** - * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband - * @aux: Fake sideband AUX CH - * @offset: address of the (first) register to read - * @buffer: buffer to store the register values - * @size: number of bytes in @buffer - * - * Performs the same functionality for remote devices via - * sideband messaging as drm_dp_dpcd_read() does for local - * devices via actual AUX CH. - * - * Return: Number of bytes read, or negative error code on failure. - */ -ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, - unsigned int offset, void *buffer, size_t size) -{ - struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, - aux); - - return drm_dp_send_dpcd_read(port->mgr, port, - offset, size, buffer); -} - -/** - * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband - * @aux: Fake sideband AUX CH - * @offset: address of the (first) register to write - * @buffer: buffer containing the values to write - * @size: number of bytes in @buffer - * - * Performs the same functionality for remote devices via - * sideband messaging as drm_dp_dpcd_write() does for local - * devices via actual AUX CH. - * - * Return: 0 on success, negative error code on failure. - */ -ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, - unsigned int offset, void *buffer, size_t size) -{ - struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, - aux); - - return drm_dp_send_dpcd_write(port->mgr, port, - offset, size, buffer); -} - static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) { int ret; @@ -1577,46 +1526,6 @@ static void build_mst_prop_path(const st strlcat(proppath, temp, proppath_size); } -/** - * drm_dp_mst_connector_late_register() - Late MST connector registration - * @connector: The MST connector - * @port: The MST port for this connector - * - * Helper to register the remote aux device for this MST port. Drivers should - * call this from their mst connector's late_register hook to enable MST aux - * devices. - * - * Return: 0 on success, negative error code on failure. - */ -int drm_dp_mst_connector_late_register(struct drm_connector *connector, - struct drm_dp_mst_port *port) -{ - DRM_DEBUG_KMS("registering %s remote bus for %s\n", - port->aux.name, connector->kdev->kobj.name); - - port->aux.dev = connector->kdev; - return drm_dp_aux_register_devnode(&port->aux); -} -EXPORT_SYMBOL(drm_dp_mst_connector_late_register); - -/** - * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration - * @connector: The MST connector - * @port: The MST port for this connector - * - * Helper to unregister the remote aux device for this MST port, registered by - * drm_dp_mst_connector_late_register(). Drivers should call this from their mst - * connector's early_unregister hook. - */ -void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, - struct drm_dp_mst_port *port) -{ - DRM_DEBUG_KMS("unregistering %s remote bus for %s\n", - port->aux.name, connector->kdev->kobj.name); - drm_dp_aux_unregister_devnode(&port->aux); -} -EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); - static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, struct drm_device *dev, struct drm_dp_link_addr_reply_port *port_msg) @@ -1639,7 +1548,6 @@ static void drm_dp_add_port(struct drm_d port->mgr = mstb->mgr; port->aux.name = "DPMST"; port->aux.dev = dev->dev; - port->aux.is_remote = true; /* * Make sure the memory allocation for our parent branch stays @@ -1908,6 +1816,7 @@ static bool drm_dp_validate_guid(struct return false; } +#if 0 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes) { struct drm_dp_sideband_msg_req_body req; @@ -1920,6 +1829,7 @@ static int build_dpcd_read(struct drm_dp return 0; } +#endif static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, u8 *msg, int len) @@ -2531,58 +2441,26 @@ int drm_dp_update_payload_part2(struct d } EXPORT_SYMBOL(drm_dp_update_payload_part2); +#if 0 /* unused as of yet */ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, - int offset, int size, u8 *bytes) + int offset, int size) { int len; - int ret = 0; struct drm_dp_sideband_msg_tx *txmsg; - struct drm_dp_mst_branch *mstb; - - mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); - if (!mstb) - return -EINVAL; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); - if (!txmsg) { - ret = -ENOMEM; - goto fail_put; - } + if (!txmsg) + return -ENOMEM; - len = build_dpcd_read(txmsg, port->port_num, offset, size); + len = build_dpcd_read(txmsg, port->port_num, 0, 8); txmsg->dst = port->parent; drm_dp_queue_down_tx(mgr, txmsg); - ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); - if (ret < 0) - goto fail_free; - - /* DPCD read should never be NACKed */ - if (txmsg->reply.reply_type == 1) { - DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", - mstb, port->port_num, offset, size); - ret = -EIO; - goto fail_free; - } - - if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) { - ret = -EPROTO; - goto fail_free; - } - - ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes, - size); - memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret); - -fail_free: - kfree(txmsg); -fail_put: - drm_dp_mst_topology_put_mstb(mstb); - - return ret; + return 0; } +#endif static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, @@ -2611,7 +2489,7 @@ static int drm_dp_send_dpcd_write(struct ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); if (ret > 0) { if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) - ret = -EIO; + ret = -EINVAL; else ret = 0; } diff -rupN a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c --- a/drivers/gpu/drm/drm_drv.c 2019-09-23 21:45:36.275568803 +0200 +++ b/drivers/gpu/drm/drm_drv.c 2019-09-23 21:44:38.811735842 +0200 @@ -328,9 +328,11 @@ void drm_minor_release(struct drm_minor * struct drm_device *drm; * int ret; * - * // devm_kzalloc() can't be used here because the drm_device ' - * // lifetime can exceed the device lifetime if driver unbind - * // happens when userspace still has open file descriptors. + * [ + * devm_kzalloc() can't be used here because the drm_device + * lifetime can exceed the device lifetime if driver unbind + * happens when userspace still has open file descriptors. + * ] * priv = kzalloc(sizeof(*priv), GFP_KERNEL); * if (!priv) * return -ENOMEM; @@ -353,7 +355,7 @@ void drm_minor_release(struct drm_minor * if (IS_ERR(priv->pclk)) * return PTR_ERR(priv->pclk); * - * // Further setup, display pipeline etc + * [ Further setup, display pipeline etc ] * * platform_set_drvdata(pdev, drm); * @@ -368,7 +370,7 @@ void drm_minor_release(struct drm_minor * return 0; * } * - * // This function is called before the devm_ resources are released + * [ This function is called before the devm_ resources are released ] * static int driver_remove(struct platform_device *pdev) * { * struct drm_device *drm = platform_get_drvdata(pdev); @@ -379,7 +381,7 @@ void drm_minor_release(struct drm_minor * return 0; * } * - * // This function is called on kernel restart and shutdown + * [ This function is called on kernel restart and shutdown ] * static void driver_shutdown(struct platform_device *pdev) * { * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); @@ -976,14 +978,14 @@ int drm_dev_register(struct drm_device * if (ret) goto err_minors; + dev->registered = true; + if (dev->driver->load) { ret = dev->driver->load(dev, flags); if (ret) goto err_minors; } - dev->registered = true; - if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_modeset_register_all(dev); diff -rupN a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c --- a/drivers/gpu/drm/drm_file.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_file.c 2019-09-23 21:44:38.811735842 +0200 @@ -147,7 +147,8 @@ struct drm_file *drm_file_alloc(struct d if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) drm_syncobj_open(file); - drm_prime_init_file_private(&file->prime); + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_prime_init_file_private(&file->prime); if (dev->driver->open) { ret = dev->driver->open(dev, file); @@ -158,7 +159,8 @@ struct drm_file *drm_file_alloc(struct d return file; out_prime_destroy: - drm_prime_destroy_file_private(&file->prime); + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_prime_destroy_file_private(&file->prime); if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) drm_syncobj_release(file); if (drm_core_check_feature(dev, DRIVER_GEM)) @@ -251,7 +253,8 @@ void drm_file_free(struct drm_file *file if (dev->driver->postclose) dev->driver->postclose(dev, file); - drm_prime_destroy_file_private(&file->prime); + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_prime_destroy_file_private(&file->prime); WARN_ON(!list_empty(&file->event_list)); diff -rupN a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c --- a/drivers/gpu/drm/drm_gem.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_gem.c 2019-09-23 21:44:38.811735842 +0200 @@ -39,7 +39,6 @@ #include #include -#include #include #include #include @@ -159,7 +158,7 @@ void drm_gem_private_object_init(struct kref_init(&obj->refcount); obj->handle_count = 0; obj->size = size; - dma_resv_init(&obj->_resv); + reservation_object_init(&obj->_resv); if (!obj->resv) obj->resv = &obj->_resv; @@ -255,7 +254,8 @@ drm_gem_object_release_handle(int id, vo else if (dev->driver->gem_close_object) dev->driver->gem_close_object(obj, file_priv); - drm_gem_remove_prime_handles(obj, file_priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_gem_remove_prime_handles(obj, file_priv); drm_vma_node_revoke(&obj->vma_node, file_priv); drm_gem_object_handle_put_unlocked(obj); @@ -633,9 +633,6 @@ void drm_gem_put_pages(struct drm_gem_ob pagevec_init(&pvec); for (i = 0; i < npages; i++) { - if (!pages[i]) - continue; - if (dirty) set_page_dirty(pages[i]); @@ -755,7 +752,7 @@ drm_gem_object_lookup(struct drm_file *f EXPORT_SYMBOL(drm_gem_object_lookup); /** - * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects + * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects * shared and/or exclusive fences. * @filep: DRM file private date * @handle: userspace handle @@ -767,7 +764,7 @@ EXPORT_SYMBOL(drm_gem_object_lookup); * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than 0 on success. */ -long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, +long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout) { long ret; @@ -779,7 +776,7 @@ long drm_gem_dma_resv_wait(struct drm_fi return -EINVAL; } - ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, + ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, true, timeout); if (ret == 0) ret = -ETIME; @@ -790,7 +787,7 @@ long drm_gem_dma_resv_wait(struct drm_fi return ret; } -EXPORT_SYMBOL(drm_gem_dma_resv_wait); +EXPORT_SYMBOL(drm_gem_reservation_object_wait); /** * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl @@ -956,7 +953,7 @@ drm_gem_object_release(struct drm_gem_ob if (obj->filp) fput(obj->filp); - dma_resv_fini(&obj->_resv); + reservation_object_fini(&obj->_resv); drm_gem_free_mmap_offset(obj); } EXPORT_SYMBOL(drm_gem_object_release); @@ -1291,8 +1288,8 @@ retry: if (contended != -1) { struct drm_gem_object *obj = objs[contended]; - ret = dma_resv_lock_slow_interruptible(obj->resv, - acquire_ctx); + ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock, + acquire_ctx); if (ret) { ww_acquire_done(acquire_ctx); return ret; @@ -1303,16 +1300,16 @@ retry: if (i == contended) continue; - ret = dma_resv_lock_interruptible(objs[i]->resv, - acquire_ctx); + ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock, + acquire_ctx); if (ret) { int j; for (j = 0; j < i; j++) - dma_resv_unlock(objs[j]->resv); + ww_mutex_unlock(&objs[j]->resv->lock); if (contended != -1 && contended >= i) - dma_resv_unlock(objs[contended]->resv); + ww_mutex_unlock(&objs[contended]->resv->lock); if (ret == -EDEADLK) { contended = i; @@ -1337,7 +1334,7 @@ drm_gem_unlock_reservations(struct drm_g int i; for (i = 0; i < count; i++) - dma_resv_unlock(objs[i]->resv); + ww_mutex_unlock(&objs[i]->resv->lock); ww_acquire_fini(acquire_ctx); } @@ -1413,12 +1410,12 @@ int drm_gem_fence_array_add_implicit(str if (!write) { struct dma_fence *fence = - dma_resv_get_excl_rcu(obj->resv); + reservation_object_get_excl_rcu(obj->resv); return drm_gem_fence_array_add(fence_array, fence); } - ret = dma_resv_get_fences_rcu(obj->resv, NULL, + ret = reservation_object_get_fences_rcu(obj->resv, NULL, &fence_count, &fences); if (ret || !fence_count) return ret; diff -rupN a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c 2019-09-23 21:44:38.811735842 +0200 @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include @@ -271,11 +271,11 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with * @plane: Plane * @state: Plane state the fence will be attached to * - * This function extracts the exclusive fence from &drm_gem_object.resv and - * attaches it to plane state for the atomic helper to wait on. This is - * necessary to correctly implement implicit synchronization for any buffers - * shared as a struct &dma_buf. This function can be used as the - * &drm_plane_helper_funcs.prepare_fb callback. + * This function prepares a GEM backed framebuffer for scanout by checking if + * the plane framebuffer has a DMA-BUF attached. If it does, it extracts the + * exclusive fence and attaches it to the plane state for the atomic helper to + * wait on. This function can be used as the &drm_plane_helper_funcs.prepare_fb + * callback. * * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple * gem based framebuffer drivers which have their buffers always pinned in @@ -287,15 +287,17 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with int drm_gem_fb_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { - struct drm_gem_object *obj; + struct dma_buf *dma_buf; struct dma_fence *fence; if (!state->fb) return 0; - obj = drm_gem_fb_get_obj(state->fb, 0); - fence = dma_resv_get_excl_rcu(obj->resv); - drm_atomic_set_fence_for_plane(state, fence); + dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf; + if (dma_buf) { + fence = reservation_object_get_excl_rcu(dma_buf->resv); + drm_atomic_set_fence_for_plane(state, fence); + } return 0; } @@ -307,11 +309,10 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb) * @pipe: Simple display pipe * @plane_state: Plane state * - * This function uses drm_gem_fb_prepare_fb() to extract the exclusive fence - * from &drm_gem_object.resv and attaches it to plane state for the atomic - * helper to wait on. This is necessary to correctly implement implicit - * synchronization for any buffers shared as a struct &dma_buf. Drivers can use - * this as their &drm_simple_display_pipe_funcs.prepare_fb callback. + * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has a + * &dma_buf attached, extracts the exclusive fence and attaches it to plane + * state for the atomic helper to wait on. Drivers can use this as their + * &drm_simple_display_pipe_funcs.prepare_fb callback. * * See drm_atomic_set_fence_for_plane() for a discussion of implicit and * explicit fencing in atomic modeset updates. @@ -322,3 +323,46 @@ int drm_gem_fb_simple_display_pipe_prepa return drm_gem_fb_prepare_fb(&pipe->plane, plane_state); } EXPORT_SYMBOL(drm_gem_fb_simple_display_pipe_prepare_fb); + +/** + * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev + * emulation + * @dev: DRM device + * @sizes: fbdev size description + * @pitch_align: Optional pitch alignment + * @obj: GEM object backing the framebuffer + * @funcs: Optional vtable to be used for the new framebuffer object when the + * dirty callback is needed. + * + * This function creates a framebuffer from a &drm_fb_helper_surface_size + * description for use in the &drm_fb_helper_funcs.fb_probe callback. + * + * Returns: + * Pointer to a &drm_framebuffer on success or an error pointer on failure. + */ +struct drm_framebuffer * +drm_gem_fbdev_fb_create(struct drm_device *dev, + struct drm_fb_helper_surface_size *sizes, + unsigned int pitch_align, struct drm_gem_object *obj, + const struct drm_framebuffer_funcs *funcs) +{ + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = sizes->surface_width * + DIV_ROUND_UP(sizes->surface_bpp, 8); + if (pitch_align) + mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], + pitch_align); + mode_cmd.pixel_format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, + sizes->surface_depth); + if (obj->size < mode_cmd.pitches[0] * mode_cmd.height) + return ERR_PTR(-EINVAL); + + if (!funcs) + funcs = &drm_gem_fb_funcs; + + return drm_gem_fb_alloc(dev, &mode_cmd, &obj, 1, funcs); +} +EXPORT_SYMBOL(drm_gem_fbdev_fb_create); diff -rupN a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c --- a/drivers/gpu/drm/drm_gem_shmem_helper.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c 2019-09-23 21:44:38.811735842 +0200 @@ -10,7 +10,6 @@ #include #include -#include #include #include #include @@ -75,7 +74,6 @@ struct drm_gem_shmem_object *drm_gem_shm shmem = to_drm_gem_shmem_obj(obj); mutex_init(&shmem->pages_lock); mutex_init(&shmem->vmap_lock); - INIT_LIST_HEAD(&shmem->madv_list); /* * Our buffers are kept pinned, so allocating them @@ -119,11 +117,11 @@ void drm_gem_shmem_free_object(struct dr if (shmem->sgt) { dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, shmem->sgt->nents, DMA_BIDIRECTIONAL); + + drm_gem_shmem_put_pages(shmem); sg_free_table(shmem->sgt); kfree(shmem->sgt); } - if (shmem->pages) - drm_gem_shmem_put_pages(shmem); } WARN_ON(shmem->pages_use_count); @@ -363,71 +361,6 @@ drm_gem_shmem_create_with_handle(struct } EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); -/* Update madvise status, returns true if not purged, else - * false or -errno. - */ -int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) -{ - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - - mutex_lock(&shmem->pages_lock); - - if (shmem->madv >= 0) - shmem->madv = madv; - - madv = shmem->madv; - - mutex_unlock(&shmem->pages_lock); - - return (madv >= 0); -} -EXPORT_SYMBOL(drm_gem_shmem_madvise); - -void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) -{ - struct drm_device *dev = obj->dev; - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - - WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); - - dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, - shmem->sgt->nents, DMA_BIDIRECTIONAL); - sg_free_table(shmem->sgt); - kfree(shmem->sgt); - shmem->sgt = NULL; - - drm_gem_shmem_put_pages_locked(shmem); - - shmem->madv = -1; - - drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); - drm_gem_free_mmap_offset(obj); - - /* Our goal here is to return as much of the memory as - * is possible back to the system as we are called from OOM. - * To do this we must instruct the shmfs to drop all of its - * backing pages, *now*. - */ - shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); - - invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, - 0, (loff_t)-1); -} -EXPORT_SYMBOL(drm_gem_shmem_purge_locked); - -bool drm_gem_shmem_purge(struct drm_gem_object *obj) -{ - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - - if (!mutex_trylock(&shmem->pages_lock)) - return false; - drm_gem_shmem_purge_locked(obj); - mutex_unlock(&shmem->pages_lock); - - return true; -} -EXPORT_SYMBOL(drm_gem_shmem_purge); - /** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object * @file: DRM file structure to create the dumb buffer for diff -rupN a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c --- a/drivers/gpu/drm/drm_gem_vram_helper.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_gem_vram_helper.c 2019-09-23 21:44:38.811735842 +0200 @@ -7,8 +7,6 @@ #include #include -static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; - /** * DOC: overview * @@ -26,7 +24,7 @@ static void drm_gem_vram_cleanup(struct * TTM buffer object in 'bo' has already been cleaned * up; only release the GEM object. */ - drm_gem_object_release(&gbo->bo.base); + drm_gem_object_release(&gbo->gem); } static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo) @@ -82,10 +80,7 @@ static int drm_gem_vram_init(struct drm_ int ret; size_t acc_size; - if (!gbo->bo.base.funcs) - gbo->bo.base.funcs = &drm_gem_vram_object_funcs; - - ret = drm_gem_object_init(dev, &gbo->bo.base, size); + ret = drm_gem_object_init(dev, &gbo->gem, size); if (ret) return ret; @@ -103,7 +98,7 @@ static int drm_gem_vram_init(struct drm_ return 0; err_drm_gem_object_release: - drm_gem_object_release(&gbo->bo.base); + drm_gem_object_release(&gbo->gem); return ret; } @@ -168,7 +163,7 @@ EXPORT_SYMBOL(drm_gem_vram_put); */ u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo) { - return drm_vma_node_offset_addr(&gbo->bo.base.vma_node); + return drm_vma_node_offset_addr(&gbo->bo.vma_node); } EXPORT_SYMBOL(drm_gem_vram_mmap_offset); @@ -383,11 +378,11 @@ int drm_gem_vram_fill_create_dumb(struct if (IS_ERR(gbo)) return PTR_ERR(gbo); - ret = drm_gem_handle_create(file, &gbo->bo.base, &handle); + ret = drm_gem_handle_create(file, &gbo->gem, &handle); if (ret) goto err_drm_gem_object_put_unlocked; - drm_gem_object_put_unlocked(&gbo->bo.base); + drm_gem_object_put_unlocked(&gbo->gem); args->pitch = pitch; args->size = size; @@ -396,7 +391,7 @@ int drm_gem_vram_fill_create_dumb(struct return 0; err_drm_gem_object_put_unlocked: - drm_gem_object_put_unlocked(&gbo->bo.base); + drm_gem_object_put_unlocked(&gbo->gem); return ret; } EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb); @@ -446,7 +441,7 @@ int drm_gem_vram_bo_driver_verify_access { struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo); - return drm_vma_node_verify_access(&gbo->bo.base.vma_node, + return drm_vma_node_verify_access(&gbo->gem.vma_node, filp->private_data); } EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access); @@ -465,24 +460,21 @@ const struct drm_vram_mm_funcs drm_gem_v EXPORT_SYMBOL(drm_gem_vram_mm_funcs); /* - * Helpers for struct drm_gem_object_funcs + * Helpers for struct drm_driver */ /** - * drm_gem_vram_object_free() - \ - Implements &struct drm_gem_object_funcs.free - * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem + * drm_gem_vram_driver_gem_free_object_unlocked() - \ + Implements &struct drm_driver.gem_free_object_unlocked + * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem */ -static void drm_gem_vram_object_free(struct drm_gem_object *gem) +void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); drm_gem_vram_put(gbo); } - -/* - * Helpers for dump buffers - */ +EXPORT_SYMBOL(drm_gem_vram_driver_gem_free_object_unlocked); /** * drm_gem_vram_driver_create_dumb() - \ @@ -544,19 +536,19 @@ int drm_gem_vram_driver_dumb_mmap_offset EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset); /* - * PRIME helpers + * PRIME helpers for struct drm_driver */ /** - * drm_gem_vram_object_pin() - \ - Implements &struct drm_gem_object_funcs.pin + * drm_gem_vram_driver_gem_prime_pin() - \ + Implements &struct drm_driver.gem_prime_pin * @gem: The GEM object to pin * * Returns: * 0 on success, or * a negative errno code otherwise. */ -static int drm_gem_vram_object_pin(struct drm_gem_object *gem) +int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *gem) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); @@ -570,29 +562,31 @@ static int drm_gem_vram_object_pin(struc */ return drm_gem_vram_pin(gbo, 0); } +EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_pin); /** - * drm_gem_vram_object_unpin() - \ - Implements &struct drm_gem_object_funcs.unpin + * drm_gem_vram_driver_gem_prime_unpin() - \ + Implements &struct drm_driver.gem_prime_unpin * @gem: The GEM object to unpin */ -static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) +void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *gem) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); drm_gem_vram_unpin(gbo); } +EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_unpin); /** - * drm_gem_vram_object_vmap() - \ - Implements &struct drm_gem_object_funcs.vmap + * drm_gem_vram_driver_gem_prime_vmap() - \ + Implements &struct drm_driver.gem_prime_vmap * @gem: The GEM object to map * * Returns: * The buffers virtual address on success, or * NULL otherwise. */ -static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) +void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *gem) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); int ret; @@ -608,30 +602,40 @@ static void *drm_gem_vram_object_vmap(st } return base; } +EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vmap); /** - * drm_gem_vram_object_vunmap() - \ - Implements &struct drm_gem_object_funcs.vunmap + * drm_gem_vram_driver_gem_prime_vunmap() - \ + Implements &struct drm_driver.gem_prime_vunmap * @gem: The GEM object to unmap * @vaddr: The mapping's base address */ -static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, - void *vaddr) +void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *gem, + void *vaddr) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); drm_gem_vram_kunmap(gbo); drm_gem_vram_unpin(gbo); } +EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vunmap); -/* - * GEM object funcs +/** + * drm_gem_vram_driver_gem_prime_mmap() - \ + Implements &struct drm_driver.gem_prime_mmap + * @gem: The GEM object to map + * @vma: The VMA describing the mapping + * + * Returns: + * 0 on success, or + * a negative errno code otherwise. */ +int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *gem, + struct vm_area_struct *vma) +{ + struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); -static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { - .free = drm_gem_vram_object_free, - .pin = drm_gem_vram_object_pin, - .unpin = drm_gem_vram_object_unpin, - .vmap = drm_gem_vram_object_vmap, - .vunmap = drm_gem_vram_object_vunmap -}; + gbo->gem.vma_node.vm_node.start = gbo->bo.vma_node.vm_node.start; + return drm_gem_prime_mmap(gem, vma); +} +EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_mmap); diff -rupN a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c --- a/drivers/gpu/drm/drm_hdcp.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_hdcp.c 2019-09-23 21:44:38.811735842 +0200 @@ -271,13 +271,6 @@ exit: * * SRM should be presented in the name of "display_hdcp_srm.bin". * - * Format of the SRM table, that userspace needs to write into the binary file, - * is defined at: - * 1. Renewability chapter on 55th page of HDCP 1.4 specification - * https://www.digital-cp.com/sites/default/files/specifications/HDCP%20Specification%20Rev1_4_Secure.pdf - * 2. Renewability chapter on 63rd page of HDCP 2.2 specification - * https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf - * * Returns: * TRUE on any of the KSV is revoked, else FALSE. */ @@ -351,45 +344,23 @@ static struct drm_prop_enum_list drm_cp_ }; DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) -static struct drm_prop_enum_list drm_hdcp_content_type_enum_list[] = { - { DRM_MODE_HDCP_CONTENT_TYPE0, "HDCP Type0" }, - { DRM_MODE_HDCP_CONTENT_TYPE1, "HDCP Type1" }, -}; -DRM_ENUM_NAME_FN(drm_get_hdcp_content_type_name, - drm_hdcp_content_type_enum_list) - /** * drm_connector_attach_content_protection_property - attach content protection * property * * @connector: connector to attach CP property on. - * @hdcp_content_type: is HDCP Content Type property needed for connector * * This is used to add support for content protection on select connectors. * Content Protection is intentionally vague to allow for different underlying * technologies, however it is most implemented by HDCP. * - * When hdcp_content_type is true enum property called HDCP Content Type is - * created (if it is not already) and attached to the connector. - * - * This property is used for sending the protected content's stream type - * from userspace to kernel on selected connectors. Protected content provider - * will decide their type of their content and declare the same to kernel. - * - * Content type will be used during the HDCP 2.2 authentication. - * Content type will be set to &drm_connector_state.hdcp_content_type. - * * The content protection will be set to &drm_connector_state.content_protection * - * When kernel triggered content protection state change like DESIRED->ENABLED - * and ENABLED->DESIRED, will use drm_hdcp_update_content_protection() to update - * the content protection state of a connector. - * * Returns: * Zero on success, negative errno on failure. */ int drm_connector_attach_content_protection_property( - struct drm_connector *connector, bool hdcp_content_type) + struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_property *prop = @@ -406,52 +377,6 @@ int drm_connector_attach_content_protect DRM_MODE_CONTENT_PROTECTION_UNDESIRED); dev->mode_config.content_protection_property = prop; - if (!hdcp_content_type) - return 0; - - prop = dev->mode_config.hdcp_content_type_property; - if (!prop) - prop = drm_property_create_enum(dev, 0, "HDCP Content Type", - drm_hdcp_content_type_enum_list, - ARRAY_SIZE( - drm_hdcp_content_type_enum_list)); - if (!prop) - return -ENOMEM; - - drm_object_attach_property(&connector->base, prop, - DRM_MODE_HDCP_CONTENT_TYPE0); - dev->mode_config.hdcp_content_type_property = prop; - return 0; } EXPORT_SYMBOL(drm_connector_attach_content_protection_property); - -/** - * drm_hdcp_update_content_protection - Updates the content protection state - * of a connector - * - * @connector: drm_connector on which content protection state needs an update - * @val: New state of the content protection property - * - * This function can be used by display drivers, to update the kernel triggered - * content protection state changes of a drm_connector such as DESIRED->ENABLED - * and ENABLED->DESIRED. No uevent for DESIRED->UNDESIRED or ENABLED->UNDESIRED, - * as userspace is triggering such state change and kernel performs it without - * fail.This function update the new state of the property into the connector's - * state and generate an uevent to notify the userspace. - */ -void drm_hdcp_update_content_protection(struct drm_connector *connector, - u64 val) -{ - struct drm_device *dev = connector->dev; - struct drm_connector_state *state = connector->state; - - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - if (state->content_protection == val) - return; - - state->content_protection = val; - drm_sysfs_connector_status_event(connector, - dev->mode_config.content_protection_property); -} -EXPORT_SYMBOL(drm_hdcp_update_content_protection); diff -rupN a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c --- a/drivers/gpu/drm/drm_ioc32.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_ioc32.c 2019-09-23 21:44:38.811735842 +0200 @@ -108,7 +108,7 @@ static int compat_drm_version(struct fil .desc = compat_ptr(v32.desc), }; err = drm_ioctl_kernel(file, drm_version, &v, - DRM_RENDER_ALLOW); + DRM_UNLOCKED|DRM_RENDER_ALLOW); if (err) return err; @@ -142,7 +142,7 @@ static int compat_drm_getunique(struct f .unique = compat_ptr(uq32.unique), }; - err = drm_ioctl_kernel(file, drm_getunique, &uq, 0); + err = drm_ioctl_kernel(file, drm_getunique, &uq, DRM_UNLOCKED); if (err) return err; @@ -181,7 +181,7 @@ static int compat_drm_getmap(struct file return -EFAULT; map.offset = m32.offset; - err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0); + err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, DRM_UNLOCKED); if (err) return err; @@ -267,7 +267,7 @@ static int compat_drm_getclient(struct f client.idx = c32.idx; - err = drm_ioctl_kernel(file, drm_getclient, &client, 0); + err = drm_ioctl_kernel(file, drm_getclient, &client, DRM_UNLOCKED); if (err) return err; @@ -297,7 +297,7 @@ static int compat_drm_getstats(struct fi drm_stats32_t __user *argp = (void __user *)arg; int err; - err = drm_ioctl_kernel(file, drm_noop, NULL, 0); + err = drm_ioctl_kernel(file, drm_noop, NULL, DRM_UNLOCKED); if (err) return err; @@ -895,7 +895,8 @@ static int compat_drm_mode_addfb2(struct sizeof(req64.modifier))) return -EFAULT; - err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, 0); + err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, + DRM_UNLOCKED); if (err) return err; diff -rupN a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c --- a/drivers/gpu/drm/drm_ioctl.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_ioctl.c 2019-09-23 21:44:38.811735842 +0200 @@ -570,23 +570,24 @@ EXPORT_SYMBOL(drm_ioctl_permit); /* Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { - DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0), + DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), - DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER), DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), @@ -594,8 +595,8 @@ static const struct drm_ioctl_desc drm_i DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -641,74 +642,74 @@ static const struct drm_ioctl_desc drm_i DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0), - - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_DESTROY, drm_syncobj_destroy_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, drm_syncobj_handle_to_fd_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl, - DRM_RENDER_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl, - DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER), + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) @@ -776,7 +777,7 @@ long drm_ioctl_kernel(struct file *file, return retcode; /* Enforce sane locking for modern driver ioctls. */ - if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) || + if (!drm_core_check_feature(dev, DRIVER_LEGACY) || (flags & DRM_UNLOCKED)) retcode = func(dev, kdata, file_priv); else { diff -rupN a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c --- a/drivers/gpu/drm/drm_kms_helper_common.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_kms_helper_common.c 2019-09-23 21:44:38.811735842 +0200 @@ -40,7 +40,7 @@ MODULE_LICENSE("GPL and additional right /* Backward compatibility for drm_kms_helper.edid_firmware */ static int edid_firmware_set(const char *val, const struct kernel_param *kp) { - DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n"); + DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n"); return __drm_set_edid_firmware_path(val); } diff -rupN a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c --- a/drivers/gpu/drm/drm_legacy_misc.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_legacy_misc.c 2019-09-23 21:44:38.811735842 +0200 @@ -1,4 +1,4 @@ -/* +/** * \file drm_legacy_misc.c * Misc legacy support functions. * diff -rupN a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c --- a/drivers/gpu/drm/drm_lock.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_lock.c 2019-09-23 21:44:38.811735842 +0200 @@ -1,4 +1,4 @@ -/* +/** * \file drm_lock.c * IOCTLs for locking * diff -rupN a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c --- a/drivers/gpu/drm/drm_memory.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_memory.c 2019-09-23 21:44:38.811735842 +0200 @@ -1,4 +1,4 @@ -/* +/** * \file drm_memory.c * Memory management wrappers for DRM * diff -rupN a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c --- a/drivers/gpu/drm/drm_mipi_dsi.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_mipi_dsi.c 2019-09-23 21:44:38.815735831 +0200 @@ -93,6 +93,11 @@ static struct bus_type mipi_dsi_bus_type .pm = &mipi_dsi_device_pm_ops, }; +static int of_device_match(struct device *dev, const void *data) +{ + return dev->of_node == data; +} + /** * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a * device tree node @@ -105,7 +110,7 @@ struct mipi_dsi_device *of_find_mipi_dsi { struct device *dev; - dev = bus_find_device_by_of_node(&mipi_dsi_bus_type, np); + dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match); return dev ? to_mipi_dsi_device(dev) : NULL; } diff -rupN a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c --- a/drivers/gpu/drm/drm_mm.c 2019-09-23 21:45:36.279568791 +0200 +++ b/drivers/gpu/drm/drm_mm.c 2019-09-23 21:44:38.815735831 +0200 @@ -472,7 +472,7 @@ int drm_mm_insert_node_in_range(struct d u64 remainder_mask; bool once; - DRM_MM_BUG_ON(range_start > range_end); + DRM_MM_BUG_ON(range_start >= range_end); if (unlikely(size == 0 || range_end - range_start < size)) return -ENOSPC; diff -rupN a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c --- a/drivers/gpu/drm/drm_mode_object.c 2019-09-23 21:45:36.283568778 +0200 +++ b/drivers/gpu/drm/drm_mode_object.c 2019-09-23 21:44:38.815735831 +0200 @@ -42,8 +42,6 @@ int __drm_mode_object_add(struct drm_dev { int ret; - WARN_ON(dev->registered && !obj_free_cb); - mutex_lock(&dev->mode_config.idr_mutex); ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL); @@ -104,8 +102,6 @@ void drm_mode_object_register(struct drm void drm_mode_object_unregister(struct drm_device *dev, struct drm_mode_object *object) { - WARN_ON(dev->registered && !object->free_cb); - mutex_lock(&dev->mode_config.idr_mutex); if (object->id) { idr_remove(&dev->mode_config.object_idr, object->id); diff -rupN a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c --- a/drivers/gpu/drm/drm_modes.c 2019-09-23 21:45:36.283568778 +0200 +++ b/drivers/gpu/drm/drm_modes.c 2019-09-23 21:44:38.815735831 +0200 @@ -1956,11 +1956,8 @@ void drm_mode_convert_to_umode(struct dr case HDMI_PICTURE_ASPECT_256_135: out->flags |= DRM_MODE_FLAG_PIC_AR_256_135; break; + case HDMI_PICTURE_ASPECT_RESERVED: default: - WARN(1, "Invalid aspect ratio (0%x) on mode\n", - in->picture_aspect_ratio); - /* fall through */ - case HDMI_PICTURE_ASPECT_NONE: out->flags |= DRM_MODE_FLAG_PIC_AR_NONE; break; } @@ -2019,22 +2016,20 @@ int drm_mode_convert_umode(struct drm_de switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) { case DRM_MODE_FLAG_PIC_AR_4_3: - out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3; + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3; break; case DRM_MODE_FLAG_PIC_AR_16_9: - out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9; + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9; break; case DRM_MODE_FLAG_PIC_AR_64_27: - out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27; + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27; break; case DRM_MODE_FLAG_PIC_AR_256_135: - out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135; + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135; break; - case DRM_MODE_FLAG_PIC_AR_NONE: + default: out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; break; - default: - return -EINVAL; } out->status = drm_mode_validate_driver(dev, out); diff -rupN a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c --- a/drivers/gpu/drm/drm_panel.c 2019-09-23 21:45:36.283568778 +0200 +++ b/drivers/gpu/drm/drm_panel.c 2019-09-23 21:44:38.815735831 +0200 @@ -123,109 +123,17 @@ EXPORT_SYMBOL(drm_panel_attach); * * This function should not be called by the panel device itself. It * is only for the drm device that called drm_panel_attach(). - */ -void drm_panel_detach(struct drm_panel *panel) -{ - panel->connector = NULL; - panel->drm = NULL; -} -EXPORT_SYMBOL(drm_panel_detach); - -/** - * drm_panel_prepare - power on a panel - * @panel: DRM panel - * - * Calling this function will enable power and deassert any reset signals to - * the panel. After this has completed it is possible to communicate with any - * integrated circuitry via a command bus. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_panel_prepare(struct drm_panel *panel) -{ - if (panel && panel->funcs && panel->funcs->prepare) - return panel->funcs->prepare(panel); - - return panel ? -ENOSYS : -EINVAL; -} -EXPORT_SYMBOL(drm_panel_prepare); - -/** - * drm_panel_unprepare - power off a panel - * @panel: DRM panel - * - * Calling this function will completely power off a panel (assert the panel's - * reset, turn off power supplies, ...). After this function has completed, it - * is usually no longer possible to communicate with the panel until another - * call to drm_panel_prepare(). * * Return: 0 on success or a negative error code on failure. */ -int drm_panel_unprepare(struct drm_panel *panel) +int drm_panel_detach(struct drm_panel *panel) { - if (panel && panel->funcs && panel->funcs->unprepare) - return panel->funcs->unprepare(panel); - - return panel ? -ENOSYS : -EINVAL; -} -EXPORT_SYMBOL(drm_panel_unprepare); - -/** - * drm_panel_enable - enable a panel - * @panel: DRM panel - * - * Calling this function will cause the panel display drivers to be turned on - * and the backlight to be enabled. Content will be visible on screen after - * this call completes. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_panel_enable(struct drm_panel *panel) -{ - if (panel && panel->funcs && panel->funcs->enable) - return panel->funcs->enable(panel); - - return panel ? -ENOSYS : -EINVAL; -} -EXPORT_SYMBOL(drm_panel_enable); - -/** - * drm_panel_disable - disable a panel - * @panel: DRM panel - * - * This will typically turn off the panel's backlight or disable the display - * drivers. For smart panels it should still be possible to communicate with - * the integrated circuitry via any command bus after this call. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_panel_disable(struct drm_panel *panel) -{ - if (panel && panel->funcs && panel->funcs->disable) - return panel->funcs->disable(panel); - - return panel ? -ENOSYS : -EINVAL; -} -EXPORT_SYMBOL(drm_panel_disable); - -/** - * drm_panel_get_modes - probe the available display modes of a panel - * @panel: DRM panel - * - * The modes probed from the panel are automatically added to the connector - * that the panel is attached to. - * - * Return: The number of modes available from the panel on success or a - * negative error code on failure. - */ -int drm_panel_get_modes(struct drm_panel *panel) -{ - if (panel && panel->funcs && panel->funcs->get_modes) - return panel->funcs->get_modes(panel); + panel->connector = NULL; + panel->drm = NULL; - return panel ? -ENOSYS : -EINVAL; + return 0; } -EXPORT_SYMBOL(drm_panel_get_modes); +EXPORT_SYMBOL(drm_panel_detach); #ifdef CONFIG_OF /** diff -rupN a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c --- a/drivers/gpu/drm/drm_prime.c 2019-09-23 21:45:36.283568778 +0200 +++ b/drivers/gpu/drm/drm_prime.c 2019-09-23 21:44:38.815735831 +0200 @@ -30,7 +30,6 @@ #include #include -#include #include #include #include @@ -39,52 +38,47 @@ #include "drm_internal.h" -/** - * DOC: overview and lifetime rules +/* + * DMA-BUF/GEM Object references and lifetime overview: + * + * On the export the dma_buf holds a reference to the exporting GEM + * object. It takes this reference in handle_to_fd_ioctl, when it + * first calls .prime_export and stores the exporting GEM object in + * the dma_buf priv. This reference needs to be released when the + * final reference to the &dma_buf itself is dropped and its + * &dma_buf_ops.release function is called. For GEM-based drivers, + * the dma_buf should be exported using drm_gem_dmabuf_export() and + * then released by drm_gem_dmabuf_release(). + * + * On the import the importing GEM object holds a reference to the + * dma_buf (which in turn holds a ref to the exporting GEM object). + * It takes that reference in the fd_to_handle ioctl. + * It calls dma_buf_get, creates an attachment to it and stores the + * attachment in the GEM object. When this attachment is destroyed + * when the imported object is destroyed, we remove the attachment + * and drop the reference to the dma_buf. + * + * When all the references to the &dma_buf are dropped, i.e. when + * userspace has closed both handles to the imported GEM object (through the + * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported + * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references + * are also gone, then the dma_buf gets destroyed. This can also happen as a + * part of the clean up procedure in the drm_release() function if userspace + * fails to properly clean up. Note that both the kernel and userspace (by + * keeeping the PRIME file descriptors open) can hold references onto a + * &dma_buf. + * + * Thus the chain of references always flows in one direction + * (avoiding loops): importing_gem -> dmabuf -> exporting_gem + * + * Self-importing: if userspace is using PRIME as a replacement for flink + * then it will get a fd->handle request for a GEM object that it created. + * Drivers should detect this situation and return back the gem object + * from the dma-buf private. Prime will do this automatically for drivers that + * use the drm_gem_prime_{import,export} helpers. * - * Similar to GEM global names, PRIME file descriptors are also used to share - * buffer objects across processes. They offer additional security: as file - * descriptors must be explicitly sent over UNIX domain sockets to be shared - * between applications, they can't be guessed like the globally unique GEM - * names. - * - * Drivers that support the PRIME API implement the - * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations. - * GEM based drivers must use drm_gem_prime_handle_to_fd() and - * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the - * actual driver interfaces is provided through the &drm_gem_object_funcs.export - * and &drm_driver.gem_prime_import hooks. - * - * &dma_buf_ops implementations for GEM drivers are all individually exported - * for drivers which need to overwrite or reimplement some of them. - * - * Reference Counting for GEM Drivers - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * On the export the &dma_buf holds a reference to the exported buffer object, - * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD - * IOCTL, when it first calls &drm_gem_object_funcs.export - * and stores the exporting GEM object in the &dma_buf.priv field. This - * reference needs to be released when the final reference to the &dma_buf - * itself is dropped and its &dma_buf_ops.release function is called. For - * GEM-based drivers, the &dma_buf should be exported using - * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release(). - * - * Thus the chain of references always flows in one direction, avoiding loops: - * importing GEM object -> dma-buf -> exported GEM bo. A further complication - * are the lookup caches for import and export. These are required to guarantee - * that any given object will always have only one uniqe userspace handle. This - * is required to allow userspace to detect duplicated imports, since some GEM - * drivers do fail command submissions if a given buffer object is listed more - * than once. These import and export caches in &drm_prime_file_private only - * retain a weak reference, which is cleaned up when the corresponding object is - * released. - * - * Self-importing: If userspace is using PRIME as a replacement for flink then - * it will get a fd->handle request for a GEM object that it created. Drivers - * should detect this situation and return back the underlying object from the - * dma-buf private. For GEM based drivers this is handled in - * drm_gem_prime_import() already. + * GEM struct &dma_buf_ops symbols are now exported. They can be resued by + * drivers which implement GEM interface. */ struct drm_prime_member { @@ -187,6 +181,42 @@ static int drm_prime_lookup_buf_handle(s return -ENOENT; } +/** + * drm_gem_map_attach - dma_buf attach implementation for GEM + * @dma_buf: buffer to attach device to + * @attach: buffer attachment data + * + * Calls &drm_driver.gem_prime_pin for device specific handling. This can be + * used as the &dma_buf_ops.attach callback. + * + * Returns 0 on success, negative error code on failure. + */ +int drm_gem_map_attach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dma_buf->priv; + + return drm_gem_pin(obj); +} +EXPORT_SYMBOL(drm_gem_map_attach); + +/** + * drm_gem_map_detach - dma_buf detach implementation for GEM + * @dma_buf: buffer to detach from + * @attach: attachment to be detached + * + * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach + * callback. + */ +void drm_gem_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dma_buf->priv; + + drm_gem_unpin(obj); +} +EXPORT_SYMBOL(drm_gem_map_detach); + void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) { @@ -212,21 +242,67 @@ void drm_prime_remove_buf_handle_locked( } } -void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) +/** + * drm_gem_map_dma_buf - map_dma_buf implementation for GEM + * @attach: attachment whose scatterlist is to be returned + * @dir: direction of DMA transfer + * + * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This + * can be used as the &dma_buf_ops.map_dma_buf callback. + * + * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR + * on error. May return -EINTR if it is interrupted by a signal. + */ + +struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) { - mutex_init(&prime_fpriv->lock); - prime_fpriv->dmabufs = RB_ROOT; - prime_fpriv->handles = RB_ROOT; + struct drm_gem_object *obj = attach->dmabuf->priv; + struct sg_table *sgt; + + if (WARN_ON(dir == DMA_NONE)) + return ERR_PTR(-EINVAL); + + if (obj->funcs) + sgt = obj->funcs->get_sg_table(obj); + else + sgt = obj->dev->driver->gem_prime_get_sg_table(obj); + + if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC)) { + sg_free_table(sgt); + kfree(sgt); + sgt = ERR_PTR(-ENOMEM); + } + + return sgt; } +EXPORT_SYMBOL(drm_gem_map_dma_buf); -void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) +/** + * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM + * @attach: attachment to unmap buffer from + * @sgt: scatterlist info of the buffer to unmap + * @dir: direction of DMA transfer + * + * This can be used as the &dma_buf_ops.unmap_dma_buf callback. + */ +void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) { - /* by now drm_gem_release should've made sure the list is empty */ - WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); + if (!sgt) + return; + + dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC); + sg_free_table(sgt); + kfree(sgt); } +EXPORT_SYMBOL(drm_gem_unmap_dma_buf); /** - * drm_gem_dmabuf_export - &dma_buf export implementation for GEM + * drm_gem_dmabuf_export - dma_buf export implementation for GEM * @dev: parent device for the exported dmabuf * @exp_info: the export information used by dma_buf_export() * @@ -254,11 +330,11 @@ struct dma_buf *drm_gem_dmabuf_export(st EXPORT_SYMBOL(drm_gem_dmabuf_export); /** - * drm_gem_dmabuf_release - &dma_buf release implementation for GEM + * drm_gem_dmabuf_release - dma_buf release implementation for GEM * @dma_buf: buffer to be released * * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers - * must use this in their &dma_buf_ops structure as the release callback. + * must use this in their dma_buf ops structure as the release callback. * drm_gem_dmabuf_release() should be used in conjunction with * drm_gem_dmabuf_export(). */ @@ -275,100 +351,128 @@ void drm_gem_dmabuf_release(struct dma_b EXPORT_SYMBOL(drm_gem_dmabuf_release); /** - * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers - * @dev: dev to export the buffer from - * @file_priv: drm file-private structure - * @prime_fd: fd id of the dma-buf which should be imported - * @handle: pointer to storage for the handle of the imported buffer object + * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM + * @dma_buf: buffer to be mapped * - * This is the PRIME import function which must be used mandatorily by GEM - * drivers to ensure correct lifetime management of the underlying GEM object. - * The actual importing of GEM object from the dma-buf is done through the - * &drm_driver.gem_prime_import driver callback. + * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap + * callback. * - * Returns 0 on success or a negative error code on failure. + * Returns the kernel virtual address. */ -int drm_gem_prime_fd_to_handle(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, - uint32_t *handle) +void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) { - struct dma_buf *dma_buf; - struct drm_gem_object *obj; - int ret; - - dma_buf = dma_buf_get(prime_fd); - if (IS_ERR(dma_buf)) - return PTR_ERR(dma_buf); - - mutex_lock(&file_priv->prime.lock); + struct drm_gem_object *obj = dma_buf->priv; + void *vaddr; - ret = drm_prime_lookup_buf_handle(&file_priv->prime, - dma_buf, handle); - if (ret == 0) - goto out_put; + vaddr = drm_gem_vmap(obj); + if (IS_ERR(vaddr)) + vaddr = NULL; - /* never seen this one, need to import */ - mutex_lock(&dev->object_name_lock); - if (dev->driver->gem_prime_import) - obj = dev->driver->gem_prime_import(dev, dma_buf); - else - obj = drm_gem_prime_import(dev, dma_buf); - if (IS_ERR(obj)) { - ret = PTR_ERR(obj); - goto out_unlock; - } + return vaddr; +} +EXPORT_SYMBOL(drm_gem_dmabuf_vmap); - if (obj->dma_buf) { - WARN_ON(obj->dma_buf != dma_buf); - } else { - obj->dma_buf = dma_buf; - get_dma_buf(dma_buf); - } +/** + * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM + * @dma_buf: buffer to be unmapped + * @vaddr: the virtual address of the buffer + * + * Releases a kernel virtual mapping. This can be used as the + * &dma_buf_ops.vunmap callback. + */ +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct drm_gem_object *obj = dma_buf->priv; - /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ - ret = drm_gem_handle_create_tail(file_priv, obj, handle); - drm_gem_object_put_unlocked(obj); - if (ret) - goto out_put; + drm_gem_vunmap(obj, vaddr); +} +EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); - ret = drm_prime_add_buf_handle(&file_priv->prime, - dma_buf, *handle); - mutex_unlock(&file_priv->prime.lock); - if (ret) - goto fail; +/** + * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM + * @dma_buf: buffer to be mapped + * @vma: virtual address range + * + * Provides memory mapping for the buffer. This can be used as the + * &dma_buf_ops.mmap callback. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; - dma_buf_put(dma_buf); + if (!dev->driver->gem_prime_mmap) + return -ENOSYS; - return 0; + return dev->driver->gem_prime_mmap(obj, vma); +} +EXPORT_SYMBOL(drm_gem_dmabuf_mmap); -fail: - /* hmm, if driver attached, we are relying on the free-object path - * to detach.. which seems ok.. - */ - drm_gem_handle_delete(file_priv, *handle); - dma_buf_put(dma_buf); - return ret; +static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { + .cache_sgt_mapping = true, + .attach = drm_gem_map_attach, + .detach = drm_gem_map_detach, + .map_dma_buf = drm_gem_map_dma_buf, + .unmap_dma_buf = drm_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, +}; -out_unlock: - mutex_unlock(&dev->object_name_lock); -out_put: - mutex_unlock(&file_priv->prime.lock); - dma_buf_put(dma_buf); - return ret; -} -EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); +/** + * DOC: PRIME Helpers + * + * Drivers can implement @gem_prime_export and @gem_prime_import in terms of + * simpler APIs by using the helper functions @drm_gem_prime_export and + * @drm_gem_prime_import. These functions implement dma-buf support in terms of + * six lower-level driver callbacks: + * + * Export callbacks: + * + * * @gem_prime_pin (optional): prepare a GEM object for exporting + * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages + * * @gem_prime_vmap: vmap a buffer exported by your driver + * * @gem_prime_vunmap: vunmap a buffer exported by your driver + * * @gem_prime_mmap (optional): mmap a buffer exported by your driver + * + * Import callback: + * + * * @gem_prime_import_sg_table (import): produce a GEM object from another + * driver's scatter/gather table + */ -int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +/** + * drm_gem_prime_export - helper library implementation of the export callback + * @dev: drm_device to export from + * @obj: GEM object to export + * @flags: flags like DRM_CLOEXEC and DRM_RDWR + * + * This is the implementation of the gem_prime_export functions for GEM drivers + * using the PRIME helpers. + */ +struct dma_buf *drm_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, + int flags) { - struct drm_prime_handle *args = data; + struct dma_buf_export_info exp_info = { + .exp_name = KBUILD_MODNAME, /* white lie for debug */ + .owner = dev->driver->fops->owner, + .ops = &drm_gem_prime_dmabuf_ops, + .size = obj->size, + .flags = flags, + .priv = obj, + .resv = obj->resv, + }; - if (!dev->driver->prime_fd_to_handle) - return -ENOSYS; + if (dev->driver->gem_prime_res_obj) + exp_info.resv = dev->driver->gem_prime_res_obj(obj); - return dev->driver->prime_fd_to_handle(dev, file_priv, - args->fd, &args->handle); + return drm_gem_dmabuf_export(dev, &exp_info); } +EXPORT_SYMBOL(drm_gem_prime_export); static struct dma_buf *export_and_register_object(struct drm_device *dev, struct drm_gem_object *obj, @@ -385,9 +489,9 @@ static struct dma_buf *export_and_regist if (obj->funcs && obj->funcs->export) dmabuf = obj->funcs->export(obj, flags); else if (dev->driver->gem_prime_export) - dmabuf = dev->driver->gem_prime_export(obj, flags); + dmabuf = dev->driver->gem_prime_export(dev, obj, flags); else - dmabuf = drm_gem_prime_export(obj, flags); + dmabuf = drm_gem_prime_export(dev, obj, flags); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref @@ -417,7 +521,7 @@ static struct dma_buf *export_and_regist * This is the PRIME export function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual exporting from GEM object to a dma-buf is done through the - * &drm_driver.gem_prime_export driver callback. + * gem_prime_export driver callback. */ int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, @@ -506,195 +610,6 @@ out_unlock: } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); -int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_prime_handle *args = data; - - if (!dev->driver->prime_handle_to_fd) - return -ENOSYS; - - /* check flags are valid */ - if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) - return -EINVAL; - - return dev->driver->prime_handle_to_fd(dev, file_priv, - args->handle, args->flags, &args->fd); -} - -/** - * DOC: PRIME Helpers - * - * Drivers can implement &drm_gem_object_funcs.export and - * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper - * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions - * implement dma-buf support in terms of some lower-level helpers, which are - * again exported for drivers to use individually: - * - * Exporting buffers - * ~~~~~~~~~~~~~~~~~ - * - * Optional pinning of buffers is handled at dma-buf attach and detach time in - * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is - * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on - * &drm_gem_object_funcs.get_sg_table. - * - * For kernel-internal access there's drm_gem_dmabuf_vmap() and - * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by - * drm_gem_dmabuf_mmap(). - * - * Note that these export helpers can only be used if the underlying backing - * storage is fully coherent and either permanently pinned, or it is safe to pin - * it indefinitely. - * - * FIXME: The underlying helper functions are named rather inconsistently. - * - * Exporting buffers - * ~~~~~~~~~~~~~~~~~ - * - * Importing dma-bufs using drm_gem_prime_import() relies on - * &drm_driver.gem_prime_import_sg_table. - * - * Note that similarly to the export helpers this permanently pins the - * underlying backing storage. Which is ok for scanout, but is not the best - * option for sharing lots of buffers for rendering. - */ - -/** - * drm_gem_map_attach - dma_buf attach implementation for GEM - * @dma_buf: buffer to attach device to - * @attach: buffer attachment data - * - * Calls &drm_gem_object_funcs.pin for device specific handling. This can be - * used as the &dma_buf_ops.attach callback. Must be used together with - * drm_gem_map_detach(). - * - * Returns 0 on success, negative error code on failure. - */ -int drm_gem_map_attach(struct dma_buf *dma_buf, - struct dma_buf_attachment *attach) -{ - struct drm_gem_object *obj = dma_buf->priv; - - return drm_gem_pin(obj); -} -EXPORT_SYMBOL(drm_gem_map_attach); - -/** - * drm_gem_map_detach - dma_buf detach implementation for GEM - * @dma_buf: buffer to detach from - * @attach: attachment to be detached - * - * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up - * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the - * &dma_buf_ops.detach callback. - */ -void drm_gem_map_detach(struct dma_buf *dma_buf, - struct dma_buf_attachment *attach) -{ - struct drm_gem_object *obj = dma_buf->priv; - - drm_gem_unpin(obj); -} -EXPORT_SYMBOL(drm_gem_map_detach); - -/** - * drm_gem_map_dma_buf - map_dma_buf implementation for GEM - * @attach: attachment whose scatterlist is to be returned - * @dir: direction of DMA transfer - * - * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This - * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together - * with drm_gem_unmap_dma_buf(). - * - * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR - * on error. May return -EINTR if it is interrupted by a signal. - */ -struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, - enum dma_data_direction dir) -{ - struct drm_gem_object *obj = attach->dmabuf->priv; - struct sg_table *sgt; - - if (WARN_ON(dir == DMA_NONE)) - return ERR_PTR(-EINVAL); - - if (obj->funcs) - sgt = obj->funcs->get_sg_table(obj); - else - sgt = obj->dev->driver->gem_prime_get_sg_table(obj); - - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { - sg_free_table(sgt); - kfree(sgt); - sgt = ERR_PTR(-ENOMEM); - } - - return sgt; -} -EXPORT_SYMBOL(drm_gem_map_dma_buf); - -/** - * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM - * @attach: attachment to unmap buffer from - * @sgt: scatterlist info of the buffer to unmap - * @dir: direction of DMA transfer - * - * This can be used as the &dma_buf_ops.unmap_dma_buf callback. - */ -void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - if (!sgt) - return; - - dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC); - sg_free_table(sgt); - kfree(sgt); -} -EXPORT_SYMBOL(drm_gem_unmap_dma_buf); - -/** - * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM - * @dma_buf: buffer to be mapped - * - * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap - * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. - * - * Returns the kernel virtual address or NULL on failure. - */ -void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) -{ - struct drm_gem_object *obj = dma_buf->priv; - void *vaddr; - - vaddr = drm_gem_vmap(obj); - if (IS_ERR(vaddr)) - vaddr = NULL; - - return vaddr; -} -EXPORT_SYMBOL(drm_gem_dmabuf_vmap); - -/** - * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM - * @dma_buf: buffer to be unmapped - * @vaddr: the virtual address of the buffer - * - * Releases a kernel virtual mapping. This can be used as the - * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. - */ -void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) -{ - struct drm_gem_object *obj = dma_buf->priv; - - drm_gem_vunmap(obj, vaddr); -} -EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); - /** * drm_gem_prime_mmap - PRIME mmap function for GEM drivers * @obj: GEM object @@ -742,117 +657,14 @@ out: EXPORT_SYMBOL(drm_gem_prime_mmap); /** - * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM - * @dma_buf: buffer to be mapped - * @vma: virtual address range - * - * Provides memory mapping for the buffer. This can be used as the - * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap, - * which should be set to drm_gem_prime_mmap(). - * - * FIXME: There's really no point to this wrapper, drivers which need anything - * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback. - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) -{ - struct drm_gem_object *obj = dma_buf->priv; - struct drm_device *dev = obj->dev; - - if (!dev->driver->gem_prime_mmap) - return -ENOSYS; - - return dev->driver->gem_prime_mmap(obj, vma); -} -EXPORT_SYMBOL(drm_gem_dmabuf_mmap); - -static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { - .cache_sgt_mapping = true, - .attach = drm_gem_map_attach, - .detach = drm_gem_map_detach, - .map_dma_buf = drm_gem_map_dma_buf, - .unmap_dma_buf = drm_gem_unmap_dma_buf, - .release = drm_gem_dmabuf_release, - .mmap = drm_gem_dmabuf_mmap, - .vmap = drm_gem_dmabuf_vmap, - .vunmap = drm_gem_dmabuf_vunmap, -}; - -/** - * drm_prime_pages_to_sg - converts a page array into an sg list - * @pages: pointer to the array of page pointers to convert - * @nr_pages: length of the page vector - * - * This helper creates an sg table object from a set of pages - * the driver is responsible for mapping the pages into the - * importers address space for use with dma_buf itself. - * - * This is useful for implementing &drm_gem_object_funcs.get_sg_table. - */ -struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) -{ - struct sg_table *sg = NULL; - int ret; - - sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!sg) { - ret = -ENOMEM; - goto out; - } - - ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, GFP_KERNEL); - if (ret) - goto out; - - return sg; -out: - kfree(sg); - return ERR_PTR(ret); -} -EXPORT_SYMBOL(drm_prime_pages_to_sg); - -/** - * drm_gem_prime_export - helper library implementation of the export callback - * @obj: GEM object to export - * @flags: flags like DRM_CLOEXEC and DRM_RDWR - * - * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers - * using the PRIME helpers. It is used as the default in - * drm_gem_prime_handle_to_fd(). - */ -struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, - int flags) -{ - struct drm_device *dev = obj->dev; - struct dma_buf_export_info exp_info = { - .exp_name = KBUILD_MODNAME, /* white lie for debug */ - .owner = dev->driver->fops->owner, - .ops = &drm_gem_prime_dmabuf_ops, - .size = obj->size, - .flags = flags, - .priv = obj, - .resv = obj->resv, - }; - - return drm_gem_dmabuf_export(dev, &exp_info); -} -EXPORT_SYMBOL(drm_gem_prime_export); - -/** * drm_gem_prime_import_dev - core implementation of the import callback * @dev: drm_device to import into * @dma_buf: dma-buf object to import * @attach_dev: struct device to dma_buf attach * - * This is the core of drm_gem_prime_import(). It's designed to be called by - * drivers who want to use a different device structure than &drm_device.dev for - * attaching via dma_buf. This function calls - * &drm_driver.gem_prime_import_sg_table internally. - * - * Drivers must arrange to call drm_prime_gem_destroy() from their - * &drm_gem_object_funcs.free hook when using this function. + * This is the core of drm_gem_prime_import. It's designed to be called by + * drivers who want to use a different device structure than dev->dev for + * attaching via dma_buf. */ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, struct dma_buf *dma_buf, @@ -897,7 +709,6 @@ struct drm_gem_object *drm_gem_prime_imp } obj->import_attach = attach; - obj->resv = dma_buf->resv; return obj; @@ -917,12 +728,7 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev); * @dma_buf: dma-buf object to import * * This is the implementation of the gem_prime_import functions for GEM drivers - * using the PRIME helpers. Drivers can use this as their - * &drm_driver.gem_prime_import implementation. It is used as the default - * implementation in drm_gem_prime_fd_to_handle(). - * - * Drivers must arrange to call drm_prime_gem_destroy() from their - * &drm_gem_object_funcs.free hook when using this function. + * using the PRIME helpers. */ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) @@ -932,6 +738,154 @@ struct drm_gem_object *drm_gem_prime_imp EXPORT_SYMBOL(drm_gem_prime_import); /** + * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers + * @dev: dev to export the buffer from + * @file_priv: drm file-private structure + * @prime_fd: fd id of the dma-buf which should be imported + * @handle: pointer to storage for the handle of the imported buffer object + * + * This is the PRIME import function which must be used mandatorily by GEM + * drivers to ensure correct lifetime management of the underlying GEM object. + * The actual importing of GEM object from the dma-buf is done through the + * gem_import_export driver callback. + */ +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, + uint32_t *handle) +{ + struct dma_buf *dma_buf; + struct drm_gem_object *obj; + int ret; + + dma_buf = dma_buf_get(prime_fd); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + + mutex_lock(&file_priv->prime.lock); + + ret = drm_prime_lookup_buf_handle(&file_priv->prime, + dma_buf, handle); + if (ret == 0) + goto out_put; + + /* never seen this one, need to import */ + mutex_lock(&dev->object_name_lock); + if (dev->driver->gem_prime_import) + obj = dev->driver->gem_prime_import(dev, dma_buf); + else + obj = drm_gem_prime_import(dev, dma_buf); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + goto out_unlock; + } + + if (obj->dma_buf) { + WARN_ON(obj->dma_buf != dma_buf); + } else { + obj->dma_buf = dma_buf; + get_dma_buf(dma_buf); + } + + /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ + ret = drm_gem_handle_create_tail(file_priv, obj, handle); + drm_gem_object_put_unlocked(obj); + if (ret) + goto out_put; + + ret = drm_prime_add_buf_handle(&file_priv->prime, + dma_buf, *handle); + mutex_unlock(&file_priv->prime.lock); + if (ret) + goto fail; + + dma_buf_put(dma_buf); + + return 0; + +fail: + /* hmm, if driver attached, we are relying on the free-object path + * to detach.. which seems ok.. + */ + drm_gem_handle_delete(file_priv, *handle); + dma_buf_put(dma_buf); + return ret; + +out_unlock: + mutex_unlock(&dev->object_name_lock); +out_put: + mutex_unlock(&file_priv->prime.lock); + dma_buf_put(dma_buf); + return ret; +} +EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); + +int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_prime_handle *args = data; + + if (!drm_core_check_feature(dev, DRIVER_PRIME)) + return -EOPNOTSUPP; + + if (!dev->driver->prime_handle_to_fd) + return -ENOSYS; + + /* check flags are valid */ + if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) + return -EINVAL; + + return dev->driver->prime_handle_to_fd(dev, file_priv, + args->handle, args->flags, &args->fd); +} + +int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_prime_handle *args = data; + + if (!drm_core_check_feature(dev, DRIVER_PRIME)) + return -EOPNOTSUPP; + + if (!dev->driver->prime_fd_to_handle) + return -ENOSYS; + + return dev->driver->prime_fd_to_handle(dev, file_priv, + args->fd, &args->handle); +} + +/** + * drm_prime_pages_to_sg - converts a page array into an sg list + * @pages: pointer to the array of page pointers to convert + * @nr_pages: length of the page vector + * + * This helper creates an sg table object from a set of pages + * the driver is responsible for mapping the pages into the + * importers address space for use with dma_buf itself. + */ +struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) +{ + struct sg_table *sg = NULL; + int ret; + + sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sg) { + ret = -ENOMEM; + goto out; + } + + ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, + nr_pages << PAGE_SHIFT, GFP_KERNEL); + if (ret) + goto out; + + return sg; +out: + kfree(sg); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(drm_prime_pages_to_sg); + +/** * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array * @sgt: scatter-gather table to convert * @pages: optional array of page pointers to store the page array in @@ -940,9 +894,6 @@ EXPORT_SYMBOL(drm_gem_prime_import); * * Exports an sg table into an array of pages and addresses. This is currently * required by the TTM driver in order to do correct fault handling. - * - * Drivers can use this in their &drm_driver.gem_prime_import_sg_table - * implementation. */ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, dma_addr_t *addrs, int max_entries) @@ -983,7 +934,7 @@ EXPORT_SYMBOL(drm_prime_sg_to_page_addr_ * @sg: the sg-table which was pinned at import time * * This is the cleanup functions which GEM drivers need to call when they use - * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs. + * @drm_gem_prime_import to import dma-bufs. */ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) { @@ -998,3 +949,16 @@ void drm_prime_gem_destroy(struct drm_ge dma_buf_put(dma_buf); } EXPORT_SYMBOL(drm_prime_gem_destroy); + +void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) +{ + mutex_init(&prime_fpriv->lock); + prime_fpriv->dmabufs = RB_ROOT; + prime_fpriv->handles = RB_ROOT; +} + +void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) +{ + /* by now drm_gem_release should've made sure the list is empty */ + WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); +} diff -rupN a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c --- a/drivers/gpu/drm/drm_scatter.c 2019-09-23 21:45:36.283568778 +0200 +++ b/drivers/gpu/drm/drm_scatter.c 2019-09-23 21:44:38.815735831 +0200 @@ -1,4 +1,4 @@ -/* +/** * \file drm_scatter.c * IOCTLs to manage scatter/gather memory * diff -rupN a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c --- a/drivers/gpu/drm/drm_syncobj.c 2019-09-23 21:45:36.283568778 +0200 +++ b/drivers/gpu/drm/drm_syncobj.c 2019-09-23 21:44:38.815735831 +0200 @@ -29,97 +29,21 @@ /** * DOC: Overview * - * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a - * container for a synchronization primitive which can be used by userspace - * to explicitly synchronize GPU commands, can be shared between userspace - * processes, and can be shared between different DRM drivers. + * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are + * persistent objects that contain an optional fence. The fence can be updated + * with a new fence, or be NULL. + * + * syncobj's can be waited upon, where it will wait for the underlying + * fence. + * + * syncobj's can be export to fd's and back, these fd's are opaque and + * have no other use case, except passing the syncobj between processes. + * * Their primary use-case is to implement Vulkan fences and semaphores. - * The syncobj userspace API provides ioctls for several operations: * - * - Creation and destruction of syncobjs - * - Import and export of syncobjs to/from a syncobj file descriptor - * - Import and export a syncobj's underlying fence to/from a sync file - * - Reset a syncobj (set its fence to NULL) - * - Signal a syncobj (set a trivially signaled fence) - * - Wait for a syncobj's fence to appear and be signaled - * - * At it's core, a syncobj is simply a wrapper around a pointer to a struct - * &dma_fence which may be NULL. - * When a syncobj is first created, its pointer is either NULL or a pointer - * to an already signaled fence depending on whether the - * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to - * &DRM_IOCTL_SYNCOBJ_CREATE. - * When GPU work which signals a syncobj is enqueued in a DRM driver, - * the syncobj fence is replaced with a fence which will be signaled by the - * completion of that work. - * When GPU work which waits on a syncobj is enqueued in a DRM driver, the - * driver retrieves syncobj's current fence at the time the work is enqueued - * waits on that fence before submitting the work to hardware. - * If the syncobj's fence is NULL, the enqueue operation is expected to fail. - * All manipulation of the syncobjs's fence happens in terms of the current - * fence at the time the ioctl is called by userspace regardless of whether - * that operation is an immediate host-side operation (signal or reset) or - * or an operation which is enqueued in some driver queue. - * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to - * manipulate a syncobj from the host by resetting its pointer to NULL or - * setting its pointer to a fence which is already signaled. - * - * - * Host-side wait on syncobjs - * -------------------------- - * - * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a - * host-side wait on all of the syncobj fences simultaneously. - * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on - * all of the syncobj fences to be signaled before it returns. - * Otherwise, it returns once at least one syncobj fence has been signaled - * and the index of a signaled fence is written back to the client. - * - * Unlike the enqueued GPU work dependencies which fail if they see a NULL - * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set, - * the host-side wait will first wait for the syncobj to receive a non-NULL - * fence and then wait on that fence. - * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the - * syncobjs in the array has a NULL fence, -EINVAL will be returned. - * Assuming the syncobj starts off with a NULL fence, this allows a client - * to do a host wait in one thread (or process) which waits on GPU work - * submitted in another thread (or process) without having to manually - * synchronize between the two. - * This requirement is inherited from the Vulkan fence API. - * - * - * Import/export of syncobjs - * ------------------------- - * - * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD - * provide two mechanisms for import/export of syncobjs. - * - * The first lets the client import or export an entire syncobj to a file - * descriptor. - * These fd's are opaque and have no other use case, except passing the - * syncobj between processes. - * All exported file descriptors and any syncobj handles created as a - * result of importing those file descriptors own a reference to the - * same underlying struct &drm_syncobj and the syncobj can be used - * persistently across all the processes with which it is shared. - * The syncobj is freed only once the last reference is dropped. - * Unlike dma-buf, importing a syncobj creates a new handle (with its own - * reference) for every import instead of de-duplicating. - * The primary use-case of this persistent import/export is for shared - * Vulkan fences and semaphores. - * - * The second import/export mechanism, which is indicated by - * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or - * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client - * import/export the syncobj's current fence from/to a &sync_file. - * When a syncobj is exported to a sync file, that sync file wraps the - * sycnobj's fence at the time of export and any later signal or reset - * operations on the syncobj will not affect the exported sync file. - * When a sync file is imported into a syncobj, the syncobj's fence is set - * to the fence wrapped by that sync file. - * Because sync files are immutable, resetting or signaling the syncobj - * will not affect any sync files whose fences have been imported into the - * syncobj. + * syncobj have a kref reference count, but also have an optional file. + * The file is only created once the syncobj is exported. + * The file takes a reference on the kref. */ #include @@ -129,7 +53,6 @@ #include #include -#include #include #include #include @@ -1374,14 +1297,14 @@ int drm_syncobj_query_ioctl(struct drm_d struct dma_fence *iter, *last_signaled = NULL; dma_fence_chain_for_each(iter, fence) { - if (iter->context != fence->context) { - dma_fence_put(iter); - /* It is most likely that timeline has - * unorder points. */ + if (!iter) break; - } dma_fence_put(last_signaled); last_signaled = dma_fence_get(iter); + if (!to_dma_fence_chain(last_signaled)->prev_seqno) + /* It is most likely that timeline has + * unorder points. */ + break; } point = dma_fence_is_signaled(last_signaled) ? last_signaled->seqno : diff -rupN a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c --- a/drivers/gpu/drm/drm_sysfs.c 2019-09-23 21:45:36.283568778 +0200 +++ b/drivers/gpu/drm/drm_sysfs.c 2019-09-23 21:44:38.815735831 +0200 @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -27,7 +26,6 @@ #include #include "drm_internal.h" -#include "drm_crtc_internal.h" #define to_drm_minor(d) dev_get_drvdata(d) #define to_drm_connector(d) dev_get_drvdata(d) @@ -296,9 +294,6 @@ int drm_sysfs_connector_add(struct drm_c /* Let userspace know we have a new connector */ drm_sysfs_hotplug_event(dev); - if (connector->ddc) - return sysfs_create_link(&connector->kdev->kobj, - &connector->ddc->dev.kobj, "ddc"); return 0; } @@ -306,10 +301,6 @@ void drm_sysfs_connector_remove(struct d { if (!connector->kdev) return; - - if (connector->ddc) - sysfs_remove_link(&connector->kdev->kobj, "ddc"); - DRM_DEBUG("removing \"%s\" from sysfs\n", connector->name); @@ -334,9 +325,6 @@ void drm_sysfs_lease_event(struct drm_de * Send a uevent for the DRM device specified by @dev. Currently we only * set HOTPLUG=1 in the uevent environment, but this could be expanded to * deal with other types of events. - * - * Any new uapi should be using the drm_sysfs_connector_status_event() - * for uevents on connector status change. */ void drm_sysfs_hotplug_event(struct drm_device *dev) { @@ -349,37 +337,6 @@ void drm_sysfs_hotplug_event(struct drm_ } EXPORT_SYMBOL(drm_sysfs_hotplug_event); -/** - * drm_sysfs_connector_status_event - generate a DRM uevent for connector - * property status change - * @connector: connector on which property status changed - * @property: connector property whose status changed. - * - * Send a uevent for the DRM device specified by @dev. Currently we - * set HOTPLUG=1 and connector id along with the attached property id - * related to the status change. - */ -void drm_sysfs_connector_status_event(struct drm_connector *connector, - struct drm_property *property) -{ - struct drm_device *dev = connector->dev; - char hotplug_str[] = "HOTPLUG=1", conn_id[21], prop_id[21]; - char *envp[4] = { hotplug_str, conn_id, prop_id, NULL }; - - WARN_ON(!drm_mode_obj_find_prop_id(&connector->base, - property->base.id)); - - snprintf(conn_id, ARRAY_SIZE(conn_id), - "CONNECTOR=%u", connector->base.id); - snprintf(prop_id, ARRAY_SIZE(prop_id), - "PROPERTY=%u", property->base.id); - - DRM_DEBUG("generating connector status event\n"); - - kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); -} -EXPORT_SYMBOL(drm_sysfs_connector_status_event); - static void drm_sysfs_release(struct device *dev) { kfree(dev); diff -rupN a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c --- a/drivers/gpu/drm/drm_vblank.c 2019-09-23 21:45:36.283568778 +0200 +++ b/drivers/gpu/drm/drm_vblank.c 2019-09-23 21:44:38.815735831 +0200 @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "drm_internal.h" @@ -1669,28 +1670,12 @@ int drm_wait_vblank_ioctl(struct drm_dev } if (req_seq != seq) { - int wait; - DRM_DEBUG("waiting on vblank count %llu, crtc %u\n", req_seq, pipe); - wait = wait_event_interruptible_timeout(vblank->queue, - vblank_passed(drm_vblank_count(dev, pipe), req_seq) || - !READ_ONCE(vblank->enabled), - msecs_to_jiffies(3000)); - - switch (wait) { - case 0: - /* timeout */ - ret = -EBUSY; - break; - case -ERESTARTSYS: - /* interrupted by signal */ - ret = -EINTR; - break; - default: - ret = 0; - break; - } + DRM_WAIT_ON(ret, vblank->queue, 3 * HZ, + vblank_passed(drm_vblank_count(dev, pipe), + req_seq) || + !READ_ONCE(vblank->enabled)); } if (ret != -EINTR) { diff -rupN a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c --- a/drivers/gpu/drm/drm_vm.c 2019-09-23 21:45:36.283568778 +0200 +++ b/drivers/gpu/drm/drm_vm.c 2019-09-23 21:44:38.819735820 +0200 @@ -1,4 +1,4 @@ -/* +/** * \file drm_vm.c * Memory mapping for DRM * diff -rupN a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c --- a/drivers/gpu/drm/radeon/cik.c 2019-09-23 21:45:36.491568149 +0200 +++ b/drivers/gpu/drm/radeon/cik.c 2019-09-23 21:44:38.819735820 +0200 @@ -3659,7 +3659,7 @@ bool cik_semaphore_ring_emit(struct rade struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv) + struct reservation_object *resv) { struct radeon_fence *fence; struct radeon_sync sync; diff -rupN a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c --- a/drivers/gpu/drm/radeon/cik_sdma.c 2019-09-23 21:45:36.495568136 +0200 +++ b/drivers/gpu/drm/radeon/cik_sdma.c 2019-09-23 21:44:38.819735820 +0200 @@ -579,7 +579,7 @@ void cik_sdma_fini(struct radeon_device struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv) + struct reservation_object *resv) { struct radeon_fence *fence; struct radeon_sync sync; diff -rupN a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c --- a/drivers/gpu/drm/radeon/evergreen_dma.c 2019-09-23 21:45:36.495568136 +0200 +++ b/drivers/gpu/drm/radeon/evergreen_dma.c 2019-09-23 21:44:38.819735820 +0200 @@ -108,7 +108,7 @@ struct radeon_fence *evergreen_copy_dma( uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv) + struct reservation_object *resv) { struct radeon_fence *fence; struct radeon_sync sync; diff -rupN a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c --- a/drivers/gpu/drm/radeon/r100.c 2019-09-23 21:45:36.499568124 +0200 +++ b/drivers/gpu/drm/radeon/r100.c 2019-09-23 21:44:38.819735820 +0200 @@ -891,7 +891,7 @@ struct radeon_fence *r100_copy_blit(stru uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv) + struct reservation_object *resv) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_fence *fence; diff -rupN a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c --- a/drivers/gpu/drm/radeon/r200.c 2019-09-23 21:45:36.499568124 +0200 +++ b/drivers/gpu/drm/radeon/r200.c 2019-09-23 21:44:38.819735820 +0200 @@ -84,7 +84,7 @@ struct radeon_fence *r200_copy_dma(struc uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv) + struct reservation_object *resv) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_fence *fence; diff -rupN a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c --- a/drivers/gpu/drm/radeon/r600.c 2019-09-23 21:45:36.499568124 +0200 +++ b/drivers/gpu/drm/radeon/r600.c 2019-09-23 21:44:38.819735820 +0200 @@ -2963,7 +2963,7 @@ bool r600_semaphore_ring_emit(struct rad struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv) + struct reservation_object *resv) { struct radeon_fence *fence; struct radeon_sync sync; diff -rupN a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c --- a/drivers/gpu/drm/radeon/r600_dma.c 2019-09-23 21:45:36.499568124 +0200 +++ b/drivers/gpu/drm/radeon/r600_dma.c 2019-09-23 21:44:38.823735809 +0200 @@ -444,7 +444,7 @@ void r600_dma_ring_ib_execute(struct rad struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv) + struct reservation_object *resv) { struct radeon_fence *fence; struct radeon_sync sync; diff -rupN a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h --- a/drivers/gpu/drm/radeon/radeon_asic.h 2019-09-23 21:45:36.503568112 +0200 +++ b/drivers/gpu/drm/radeon/radeon_asic.h 2019-09-23 21:44:38.823735809 +0200 @@ -86,7 +86,7 @@ struct radeon_fence *r100_copy_blit(stru uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); int r100_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size); @@ -157,7 +157,7 @@ struct radeon_fence *r200_copy_dma(struc uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); void r200_set_safe_registers(struct radeon_device *rdev); /* @@ -347,11 +347,11 @@ int r600_dma_ring_test(struct radeon_dev struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); @@ -473,7 +473,7 @@ void r700_cp_fini(struct radeon_device * struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); u32 rv770_get_xclk(struct radeon_device *rdev); int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int rv770_get_temp(struct radeon_device *rdev); @@ -547,7 +547,7 @@ void evergreen_dma_ring_ib_execute(struc struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); int evergreen_get_temp(struct radeon_device *rdev); int evergreen_get_allowed_info_register(struct radeon_device *rdev, u32 reg, u32 *val); @@ -725,7 +725,7 @@ int si_ib_parse(struct radeon_device *rd struct radeon_fence *si_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); void si_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, @@ -796,11 +796,11 @@ void cik_sdma_ring_ib_execute(struct rad struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); diff -rupN a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c --- a/drivers/gpu/drm/radeon/radeon_benchmark.c 2019-09-23 21:45:36.503568112 +0200 +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c 2019-09-23 21:44:38.823735809 +0200 @@ -35,7 +35,7 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, uint64_t saddr, uint64_t daddr, int flag, int n, - struct dma_resv *resv) + struct reservation_object *resv) { unsigned long start_jiffies; unsigned long end_jiffies; @@ -122,7 +122,7 @@ static void radeon_benchmark_move(struct if (rdev->asic->copy.dma) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, RADEON_BENCHMARK_COPY_DMA, n, - dobj->tbo.base.resv); + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) @@ -133,7 +133,7 @@ static void radeon_benchmark_move(struct if (rdev->asic->copy.blit) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, RADEON_BENCHMARK_COPY_BLIT, n, - dobj->tbo.base.resv); + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) diff -rupN a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c --- a/drivers/gpu/drm/radeon/radeon_connectors.c 2019-09-23 21:45:36.503568112 +0200 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c 2019-09-23 21:44:38.823735809 +0200 @@ -752,7 +752,7 @@ static int radeon_connector_set_property radeon_encoder->output_csc = val; - if (connector->encoder && connector->encoder->crtc) { + if (connector->encoder->crtc) { struct drm_crtc *crtc = connector->encoder->crtc; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); diff -rupN a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c --- a/drivers/gpu/drm/radeon/radeon_cs.c 2019-09-23 21:45:36.503568112 +0200 +++ b/drivers/gpu/drm/radeon/radeon_cs.c 2019-09-23 21:44:38.823735809 +0200 @@ -255,9 +255,9 @@ static int radeon_cs_sync_rings(struct r int r; list_for_each_entry(reloc, &p->validated, tv.head) { - struct dma_resv *resv; + struct reservation_object *resv; - resv = reloc->robj->tbo.base.resv; + resv = reloc->robj->tbo.resv; r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, reloc->tv.num_shared); if (r) @@ -443,7 +443,7 @@ static void radeon_cs_parser_fini(struct if (bo == NULL) continue; - drm_gem_object_put_unlocked(&bo->tbo.base); + drm_gem_object_put_unlocked(&bo->gem_base); } } kfree(parser->track); diff -rupN a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c --- a/drivers/gpu/drm/radeon/radeon_device.c 2019-09-23 21:45:36.507568100 +0200 +++ b/drivers/gpu/drm/radeon/radeon_device.c 2019-09-23 21:44:38.823735809 +0200 @@ -1363,27 +1363,34 @@ int radeon_device_init(struct radeon_dev else rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */ - /* set DMA mask. + /* set DMA mask + need_dma32 flags. * PCIE - can handle 40-bits. * IGP - can handle 40-bits * AGP - generally dma32 is safest * PCI - dma32 for legacy pci gart, 40 bits on newer asics */ - dma_bits = 40; + rdev->need_dma32 = false; if (rdev->flags & RADEON_IS_AGP) - dma_bits = 32; + rdev->need_dma32 = true; if ((rdev->flags & RADEON_IS_PCI) && (rdev->family <= CHIP_RS740)) - dma_bits = 32; + rdev->need_dma32 = true; #ifdef CONFIG_PPC64 if (rdev->family == CHIP_CEDAR) - dma_bits = 32; + rdev->need_dma32 = true; #endif - r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits)); + dma_bits = rdev->need_dma32 ? 32 : 40; + r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); if (r) { + rdev->need_dma32 = true; + dma_bits = 32; pr_warn("radeon: No suitable DMA available\n"); - return r; + } + r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); + if (r) { + pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); + pr_warn("radeon: No coherent DMA available\n"); } rdev->need_swiotlb = drm_need_swiotlb(dma_bits); diff -rupN a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c --- a/drivers/gpu/drm/radeon/radeon_display.c 2019-09-23 21:45:36.507568100 +0200 +++ b/drivers/gpu/drm/radeon/radeon_display.c 2019-09-23 21:44:38.823735809 +0200 @@ -275,7 +275,7 @@ static void radeon_unpin_work_func(struc } else DRM_ERROR("failed to reserve buffer after flip\n"); - drm_gem_object_put_unlocked(&work->old_rbo->tbo.base); + drm_gem_object_put_unlocked(&work->old_rbo->gem_base); kfree(work); } @@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target( DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv)); + work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_unreserve(new_rbo); @@ -607,7 +607,7 @@ pflip_cleanup: radeon_bo_unreserve(new_rbo); cleanup: - drm_gem_object_put_unlocked(&work->old_rbo->tbo.base); + drm_gem_object_put_unlocked(&work->old_rbo->gem_base); dma_fence_put(work->fence); kfree(work); return r; diff -rupN a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c --- a/drivers/gpu/drm/radeon/radeon_drv.c 2019-09-23 21:45:36.507568100 +0200 +++ b/drivers/gpu/drm/radeon/radeon_drv.c 2019-09-23 21:44:38.823735809 +0200 @@ -131,7 +131,8 @@ int radeon_gem_object_open(struct drm_ge struct drm_file *file_priv); void radeon_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); -struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, +struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, int flags); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc, unsigned int flags, int *vpos, int *hpos, @@ -153,6 +154,7 @@ struct drm_gem_object *radeon_gem_prime_ struct sg_table *sg); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); +struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *); void *radeon_gem_prime_vmap(struct drm_gem_object *obj); void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); @@ -348,30 +350,24 @@ radeon_pci_remove(struct pci_dev *pdev) static void radeon_pci_shutdown(struct pci_dev *pdev) { - struct drm_device *ddev = pci_get_drvdata(pdev); - /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown */ if (radeon_device_is_virtual()) radeon_pci_remove(pdev); - - /* Some adapters need to be suspended before a - * shutdown occurs in order to prevent an error - * during kexec. - */ - radeon_suspend_kms(ddev, true, true, false); } static int radeon_pmops_suspend(struct device *dev) { - struct drm_device *drm_dev = dev_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); return radeon_suspend_kms(drm_dev, true, true, false); } static int radeon_pmops_resume(struct device *dev) { - struct drm_device *drm_dev = dev_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); /* GPU comes up enabled by the bios on resume */ if (radeon_is_px(drm_dev)) { @@ -385,13 +381,15 @@ static int radeon_pmops_resume(struct de static int radeon_pmops_freeze(struct device *dev) { - struct drm_device *drm_dev = dev_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); return radeon_suspend_kms(drm_dev, false, true, true); } static int radeon_pmops_thaw(struct device *dev) { - struct drm_device *drm_dev = dev_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); return radeon_resume_kms(drm_dev, false, true); } @@ -450,7 +448,8 @@ static int radeon_pmops_runtime_resume(s static int radeon_pmops_runtime_idle(struct device *dev) { - struct drm_device *drm_dev = dev_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); struct drm_crtc *crtc; if (!radeon_is_px(drm_dev)) { @@ -541,7 +540,7 @@ radeon_get_crtc_scanout_position(struct static struct drm_driver kms_driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER, + DRIVER_USE_AGP | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER, .load = radeon_driver_load_kms, .open = radeon_driver_open_kms, .postclose = radeon_driver_postclose_kms, @@ -567,8 +566,10 @@ static struct drm_driver kms_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = radeon_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = radeon_gem_prime_pin, .gem_prime_unpin = radeon_gem_prime_unpin, + .gem_prime_res_obj = radeon_gem_prime_res_obj, .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table, .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, .gem_prime_vmap = radeon_gem_prime_vmap, diff -rupN a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c --- a/drivers/gpu/drm/radeon/radeon_gem.c 2019-09-23 21:45:36.507568100 +0200 +++ b/drivers/gpu/drm/radeon/radeon_gem.c 2019-09-23 21:44:38.823735809 +0200 @@ -83,7 +83,7 @@ retry: } return r; } - *obj = &robj->tbo.base; + *obj = &robj->gem_base; robj->pid = task_pid_nr(current); mutex_lock(&rdev->gem.mutex); @@ -114,7 +114,7 @@ static int radeon_gem_set_domain(struct } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); + r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); if (!r) r = -EBUSY; @@ -449,7 +449,7 @@ int radeon_gem_busy_ioctl(struct drm_dev } robj = gem_to_radeon_bo(gobj); - r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); + r = reservation_object_test_signaled_rcu(robj->tbo.resv, true); if (r == 0) r = -EBUSY; else @@ -478,7 +478,7 @@ int radeon_gem_wait_idle_ioctl(struct dr } robj = gem_to_radeon_bo(gobj); - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); + ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); if (ret == 0) r = -EBUSY; else if (ret < 0) diff -rupN a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h --- a/drivers/gpu/drm/radeon/radeon.h 2019-09-23 21:45:36.503568112 +0200 +++ b/drivers/gpu/drm/radeon/radeon.h 2019-09-23 21:44:38.823735809 +0200 @@ -505,6 +505,7 @@ struct radeon_bo { struct list_head va; /* Constant after initialization */ struct radeon_device *rdev; + struct drm_gem_object gem_base; struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid; @@ -512,7 +513,7 @@ struct radeon_bo { struct radeon_mn *mn; struct list_head mn_list; }; -#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base) +#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) int radeon_gem_debugfs_init(struct radeon_device *rdev); @@ -619,7 +620,7 @@ void radeon_sync_fence(struct radeon_syn struct radeon_fence *fence); int radeon_sync_resv(struct radeon_device *rdev, struct radeon_sync *sync, - struct dma_resv *resv, + struct reservation_object *resv, bool shared); int radeon_sync_rings(struct radeon_device *rdev, struct radeon_sync *sync, @@ -1912,20 +1913,20 @@ struct radeon_asic { uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); u32 blit_ring_index; struct radeon_fence *(*dma)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); u32 dma_ring_index; /* method used for bo copy */ struct radeon_fence *(*copy)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv); + struct reservation_object *resv); /* ring used for bo copies */ u32 copy_ring_index; } copy; @@ -2386,6 +2387,7 @@ struct radeon_device { struct radeon_wb wb; struct radeon_dummy_page dummy_page; bool shutdown; + bool need_dma32; bool need_swiotlb; bool accel_working; bool fastfb_working; /* IGP feature*/ diff -rupN a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c --- a/drivers/gpu/drm/radeon/radeon_mn.c 2019-09-23 21:45:36.507568100 +0200 +++ b/drivers/gpu/drm/radeon/radeon_mn.c 2019-09-23 21:44:38.823735809 +0200 @@ -105,7 +105,7 @@ static int radeon_mn_invalidate_range_st continue; } - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff -rupN a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c --- a/drivers/gpu/drm/radeon/radeon_object.c 2019-09-23 21:45:36.507568100 +0200 +++ b/drivers/gpu/drm/radeon/radeon_object.c 2019-09-23 21:44:38.823735809 +0200 @@ -85,9 +85,9 @@ static void radeon_ttm_bo_destroy(struct mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); WARN_ON_ONCE(!list_empty(&bo->va)); - if (bo->tbo.base.import_attach) - drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); - drm_gem_object_release(&bo->tbo.base); + if (bo->gem_base.import_attach) + drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); + drm_gem_object_release(&bo->gem_base); kfree(bo); } @@ -183,7 +183,7 @@ void radeon_ttm_placement_from_domain(st int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, struct sg_table *sg, - struct dma_resv *resv, + struct reservation_object *resv, struct radeon_bo **bo_ptr) { struct radeon_bo *bo; @@ -209,7 +209,7 @@ int radeon_bo_create(struct radeon_devic bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; - drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size); + drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size); bo->rdev = rdev; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); @@ -442,13 +442,13 @@ void radeon_bo_force_delete(struct radeo dev_err(rdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { dev_err(rdev->dev, "%p %p %lu %lu force free\n", - &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, - *((unsigned long *)&bo->tbo.base.refcount)); + &bo->gem_base, bo, (unsigned long)bo->gem_base.size, + *((unsigned long *)&bo->gem_base.refcount)); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_put_unlocked(&bo->tbo.base); + drm_gem_object_put_unlocked(&bo->gem_base); } } @@ -610,7 +610,7 @@ int radeon_bo_get_surface_reg(struct rad int steal; int i; - dma_resv_assert_held(bo->tbo.base.resv); + lockdep_assert_held(&bo->tbo.resv->lock.base); if (!bo->tiling_flags) return 0; @@ -736,7 +736,7 @@ void radeon_bo_get_tiling_flags(struct r uint32_t *tiling_flags, uint32_t *pitch) { - dma_resv_assert_held(bo->tbo.base.resv); + lockdep_assert_held(&bo->tbo.resv->lock.base); if (tiling_flags) *tiling_flags = bo->tiling_flags; @@ -748,7 +748,7 @@ int radeon_bo_check_tiling(struct radeon bool force_drop) { if (!force_drop) - dma_resv_assert_held(bo->tbo.base.resv); + lockdep_assert_held(&bo->tbo.resv->lock.base); if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) return 0; @@ -870,10 +870,10 @@ int radeon_bo_wait(struct radeon_bo *bo, void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, bool shared) { - struct dma_resv *resv = bo->tbo.base.resv; + struct reservation_object *resv = bo->tbo.resv; if (shared) - dma_resv_add_shared_fence(resv, &fence->base); + reservation_object_add_shared_fence(resv, &fence->base); else - dma_resv_add_excl_fence(resv, &fence->base); + reservation_object_add_excl_fence(resv, &fence->base); } diff -rupN a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h --- a/drivers/gpu/drm/radeon/radeon_object.h 2019-09-23 21:45:36.507568100 +0200 +++ b/drivers/gpu/drm/radeon/radeon_object.h 2019-09-23 21:44:38.823735809 +0200 @@ -116,7 +116,7 @@ static inline unsigned radeon_bo_gpu_pag */ static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) { - return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); + return drm_vma_node_offset_addr(&bo->tbo.vma_node); } extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, @@ -126,7 +126,7 @@ extern int radeon_bo_create(struct radeo unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, struct sg_table *sg, - struct dma_resv *resv, + struct reservation_object *resv, struct radeon_bo **bo_ptr); extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); extern void radeon_bo_kunmap(struct radeon_bo *bo); diff -rupN a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c --- a/drivers/gpu/drm/radeon/radeon_prime.c 2019-09-23 21:45:36.507568100 +0200 +++ b/drivers/gpu/drm/radeon/radeon_prime.c 2019-09-23 21:44:38.827735797 +0200 @@ -63,15 +63,15 @@ struct drm_gem_object *radeon_gem_prime_ struct dma_buf_attachment *attach, struct sg_table *sg) { - struct dma_resv *resv = attach->dmabuf->resv; + struct reservation_object *resv = attach->dmabuf->resv; struct radeon_device *rdev = dev->dev_private; struct radeon_bo *bo; int ret; - dma_resv_lock(resv, NULL); + ww_mutex_lock(&resv->lock, NULL); ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false, RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); - dma_resv_unlock(resv); + ww_mutex_unlock(&resv->lock); if (ret) return ERR_PTR(ret); @@ -80,7 +80,7 @@ struct drm_gem_object *radeon_gem_prime_ mutex_unlock(&rdev->gem.mutex); bo->prime_shared_count = 1; - return &bo->tbo.base; + return &bo->gem_base; } int radeon_gem_prime_pin(struct drm_gem_object *obj) @@ -117,11 +117,19 @@ void radeon_gem_prime_unpin(struct drm_g } -struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, +struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct radeon_bo *bo = gem_to_radeon_bo(obj); + + return bo->tbo.resv; +} + +struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, int flags) { struct radeon_bo *bo = gem_to_radeon_bo(gobj); if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) return ERR_PTR(-EPERM); - return drm_gem_prime_export(gobj, flags); + return drm_gem_prime_export(dev, gobj, flags); } diff -rupN a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c --- a/drivers/gpu/drm/radeon/radeon_sync.c 2019-09-23 21:45:36.511568088 +0200 +++ b/drivers/gpu/drm/radeon/radeon_sync.c 2019-09-23 21:44:38.827735797 +0200 @@ -87,30 +87,30 @@ void radeon_sync_fence(struct radeon_syn */ int radeon_sync_resv(struct radeon_device *rdev, struct radeon_sync *sync, - struct dma_resv *resv, + struct reservation_object *resv, bool shared) { - struct dma_resv_list *flist; + struct reservation_object_list *flist; struct dma_fence *f; struct radeon_fence *fence; unsigned i; int r = 0; /* always sync to the exclusive fence */ - f = dma_resv_get_excl(resv); + f = reservation_object_get_excl(resv); fence = f ? to_radeon_fence(f) : NULL; if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); else if (f) r = dma_fence_wait(f, true); - flist = dma_resv_get_list(resv); + flist = reservation_object_get_list(resv); if (shared || !flist || r) return r; for (i = 0; i < flist->shared_count; ++i) { f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(resv)); + reservation_object_held(resv)); fence = to_radeon_fence(f); if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); diff -rupN a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c --- a/drivers/gpu/drm/radeon/radeon_test.c 2019-09-23 21:45:36.511568088 +0200 +++ b/drivers/gpu/drm/radeon/radeon_test.c 2019-09-23 21:44:38.827735797 +0200 @@ -120,11 +120,11 @@ static void radeon_do_test_moves(struct if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - vram_obj->tbo.base.resv); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - vram_obj->tbo.base.resv); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); r = PTR_ERR(fence); @@ -171,11 +171,11 @@ static void radeon_do_test_moves(struct if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - vram_obj->tbo.base.resv); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - vram_obj->tbo.base.resv); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); r = PTR_ERR(fence); diff -rupN a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c --- a/drivers/gpu/drm/radeon/radeon_ttm.c 2019-09-23 21:45:36.511568088 +0200 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c 2019-09-23 21:44:38.827735797 +0200 @@ -184,7 +184,7 @@ static int radeon_verify_access(struct t if (radeon_ttm_tt_has_userptr(bo->ttm)) return -EPERM; - return drm_vma_node_verify_access(&rbo->tbo.base.vma_node, + return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp->private_data); } @@ -244,7 +244,7 @@ static int radeon_move_blit(struct ttm_b BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); - fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv); + fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); if (IS_ERR(fence)) return PTR_ERR(fence); @@ -794,7 +794,7 @@ int radeon_ttm_init(struct radeon_device r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->ddev->anon_inode->i_mapping, - dma_addressing_limited(&rdev->pdev->dev)); + rdev->need_dma32); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; diff -rupN a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c --- a/drivers/gpu/drm/radeon/radeon_uvd.c 2019-09-23 21:45:36.511568088 +0200 +++ b/drivers/gpu/drm/radeon/radeon_uvd.c 2019-09-23 21:44:38.827735797 +0200 @@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct rade return -EINVAL; } - f = dma_resv_get_excl(bo->tbo.base.resv); + f = reservation_object_get_excl(bo->tbo.resv); if (f) { r = radeon_fence_wait((struct radeon_fence *)f, false); if (r) { diff -rupN a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c --- a/drivers/gpu/drm/radeon/radeon_vm.c 2019-09-23 21:45:36.511568088 +0200 +++ b/drivers/gpu/drm/radeon/radeon_vm.c 2019-09-23 21:44:38.827735797 +0200 @@ -702,7 +702,7 @@ int radeon_vm_update_page_directory(stru if (ib.length_dw != 0) { radeon_asic_vm_pad_ib(rdev, &ib); - radeon_sync_resv(rdev, &ib.sync, pd->tbo.base.resv, true); + radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true); WARN_ON(ib.length_dw > ndw); r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { @@ -830,8 +830,8 @@ static int radeon_vm_update_ptes(struct uint64_t pte; int r; - radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); - r = dma_resv_reserve_shared(pt->tbo.base.resv, 1); + radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true); + r = reservation_object_reserve_shared(pt->tbo.resv, 1); if (r) return r; diff -rupN a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c --- a/drivers/gpu/drm/radeon/rv770_dma.c 2019-09-23 21:45:36.515568076 +0200 +++ b/drivers/gpu/drm/radeon/rv770_dma.c 2019-09-23 21:44:38.827735797 +0200 @@ -42,7 +42,7 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv) + struct reservation_object *resv) { struct radeon_fence *fence; struct radeon_sync sync; diff -rupN a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c --- a/drivers/gpu/drm/radeon/si_dma.c 2019-09-23 21:45:36.515568076 +0200 +++ b/drivers/gpu/drm/radeon/si_dma.c 2019-09-23 21:44:38.827735797 +0200 @@ -231,7 +231,7 @@ void si_dma_vm_flush(struct radeon_devic struct radeon_fence *si_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct dma_resv *resv) + struct reservation_object *resv) { struct radeon_fence *fence; struct radeon_sync sync; diff -rupN a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c --- a/drivers/gpu/drm/ttm/ttm_bo.c 2019-09-23 21:45:36.543567991 +0200 +++ b/drivers/gpu/drm/ttm/ttm_bo.c 2019-09-23 21:44:38.827735797 +0200 @@ -41,7 +41,7 @@ #include #include #include -#include +#include static void ttm_bo_global_kobj_release(struct kobject *kobj); @@ -160,8 +160,7 @@ static void ttm_bo_release_list(struct k ttm_tt_destroy(bo->ttm); atomic_dec(&bo->bdev->glob->bo_count); dma_fence_put(bo->moving); - if (!ttm_bo_uses_embedded_gem_object(bo)) - dma_resv_fini(&bo->base._resv); + reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); bo->destroy(bo); ttm_mem_global_free(bdev->glob->mem_glob, acc_size); @@ -173,7 +172,7 @@ static void ttm_bo_add_mem_to_lru(struct struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; - dma_resv_assert_held(bo->base.resv); + reservation_object_assert_held(bo->resv); if (!list_empty(&bo->lru)) return; @@ -244,7 +243,7 @@ static void ttm_bo_bulk_move_set_pos(str void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, struct ttm_lru_bulk_move *bulk) { - dma_resv_assert_held(bo->base.resv); + reservation_object_assert_held(bo->resv); ttm_bo_del_from_lru(bo); ttm_bo_add_to_lru(bo); @@ -277,8 +276,8 @@ void ttm_bo_bulk_move_lru_tail(struct tt if (!pos->first) continue; - dma_resv_assert_held(pos->first->base.resv); - dma_resv_assert_held(pos->last->base.resv); + reservation_object_assert_held(pos->first->resv); + reservation_object_assert_held(pos->last->resv); man = &pos->first->bdev->man[TTM_PL_TT]; list_bulk_move_tail(&man->lru[i], &pos->first->lru, @@ -292,8 +291,8 @@ void ttm_bo_bulk_move_lru_tail(struct tt if (!pos->first) continue; - dma_resv_assert_held(pos->first->base.resv); - dma_resv_assert_held(pos->last->base.resv); + reservation_object_assert_held(pos->first->resv); + reservation_object_assert_held(pos->last->resv); man = &pos->first->bdev->man[TTM_PL_VRAM]; list_bulk_move_tail(&man->lru[i], &pos->first->lru, @@ -307,8 +306,8 @@ void ttm_bo_bulk_move_lru_tail(struct tt if (!pos->first) continue; - dma_resv_assert_held(pos->first->base.resv); - dma_resv_assert_held(pos->last->base.resv); + reservation_object_assert_held(pos->first->resv); + reservation_object_assert_held(pos->last->resv); lru = &pos->first->bdev->glob->swap_lru[i]; list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); @@ -439,32 +438,32 @@ static int ttm_bo_individualize_resv(str { int r; - if (bo->base.resv == &bo->base._resv) + if (bo->resv == &bo->ttm_resv) return 0; - BUG_ON(!dma_resv_trylock(&bo->base._resv)); + BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); - r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); + r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); if (r) - dma_resv_unlock(&bo->base._resv); + reservation_object_unlock(&bo->ttm_resv); return r; } static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) { - struct dma_resv_list *fobj; + struct reservation_object_list *fobj; struct dma_fence *fence; int i; - fobj = dma_resv_get_list(&bo->base._resv); - fence = dma_resv_get_excl(&bo->base._resv); + fobj = reservation_object_get_list(&bo->ttm_resv); + fence = reservation_object_get_excl(&bo->ttm_resv); if (fence && !fence->ops->signaled) dma_fence_enable_sw_signaling(fence); for (i = 0; fobj && i < fobj->shared_count; ++i) { fence = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(bo->base.resv)); + reservation_object_held(bo->resv)); if (!fence->ops->signaled) dma_fence_enable_sw_signaling(fence); @@ -482,23 +481,23 @@ static void ttm_bo_cleanup_refs_or_queue /* Last resort, if we fail to allocate memory for the * fences block for the BO to become idle */ - dma_resv_wait_timeout_rcu(bo->base.resv, true, false, + reservation_object_wait_timeout_rcu(bo->resv, true, false, 30 * HZ); spin_lock(&glob->lru_lock); goto error; } spin_lock(&glob->lru_lock); - ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY; + ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY; if (!ret) { - if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) { + if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - if (bo->base.resv != &bo->base._resv) - dma_resv_unlock(&bo->base._resv); + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); ttm_bo_cleanup_memtype_use(bo); - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); return; } @@ -514,10 +513,10 @@ static void ttm_bo_cleanup_refs_or_queue ttm_bo_add_to_lru(bo); } - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); } - if (bo->base.resv != &bo->base._resv) - dma_resv_unlock(&bo->base._resv); + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); error: kref_get(&bo->list_kref); @@ -546,15 +545,15 @@ static int ttm_bo_cleanup_refs(struct tt bool unlock_resv) { struct ttm_bo_global *glob = bo->bdev->glob; - struct dma_resv *resv; + struct reservation_object *resv; int ret; if (unlikely(list_empty(&bo->ddestroy))) - resv = bo->base.resv; + resv = bo->resv; else - resv = &bo->base._resv; + resv = &bo->ttm_resv; - if (dma_resv_test_signaled_rcu(resv, true)) + if (reservation_object_test_signaled_rcu(resv, true)) ret = 0; else ret = -EBUSY; @@ -563,10 +562,10 @@ static int ttm_bo_cleanup_refs(struct tt long lret; if (unlock_resv) - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); spin_unlock(&glob->lru_lock); - lret = dma_resv_wait_timeout_rcu(resv, true, + lret = reservation_object_wait_timeout_rcu(resv, true, interruptible, 30 * HZ); @@ -576,7 +575,7 @@ static int ttm_bo_cleanup_refs(struct tt return -EBUSY; spin_lock(&glob->lru_lock); - if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { + if (unlock_resv && !reservation_object_trylock(bo->resv)) { /* * We raced, and lost, someone else holds the reservation now, * and is probably busy in ttm_bo_cleanup_memtype_use. @@ -593,7 +592,7 @@ static int ttm_bo_cleanup_refs(struct tt if (ret || unlikely(list_empty(&bo->ddestroy))) { if (unlock_resv) - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); spin_unlock(&glob->lru_lock); return ret; } @@ -606,7 +605,7 @@ static int ttm_bo_cleanup_refs(struct tt ttm_bo_cleanup_memtype_use(bo); if (unlock_resv) - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); return 0; } @@ -632,14 +631,14 @@ static bool ttm_bo_delayed_delete(struct kref_get(&bo->list_kref); list_move_tail(&bo->ddestroy, &removed); - if (remove_all || bo->base.resv != &bo->base._resv) { + if (remove_all || bo->resv != &bo->ttm_resv) { spin_unlock(&glob->lru_lock); - dma_resv_lock(bo->base.resv, NULL); + reservation_object_lock(bo->resv, NULL); spin_lock(&glob->lru_lock); ttm_bo_cleanup_refs(bo, false, !remove_all, true); - } else if (dma_resv_trylock(bo->base.resv)) { + } else if (reservation_object_trylock(bo->resv)) { ttm_bo_cleanup_refs(bo, false, !remove_all, true); } else { spin_unlock(&glob->lru_lock); @@ -672,10 +671,7 @@ static void ttm_bo_release(struct kref * struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; - if (bo->bdev->driver->release_notify) - bo->bdev->driver->release_notify(bo); - - drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node); + drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); ttm_mem_io_lock(man, false); ttm_mem_io_free_vm(bo); ttm_mem_io_unlock(man); @@ -711,7 +707,7 @@ static int ttm_bo_evict(struct ttm_buffe struct ttm_placement placement; int ret = 0; - dma_resv_assert_held(bo->base.resv); + reservation_object_assert_held(bo->resv); placement.num_placement = 0; placement.num_busy_placement = 0; @@ -781,8 +777,8 @@ static bool ttm_bo_evict_swapout_allowab { bool ret = false; - if (bo->base.resv == ctx->resv) { - dma_resv_assert_held(bo->base.resv); + if (bo->resv == ctx->resv) { + reservation_object_assert_held(bo->resv); if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT || !list_empty(&bo->ddestroy)) ret = true; @@ -790,7 +786,7 @@ static bool ttm_bo_evict_swapout_allowab if (busy) *busy = false; } else { - ret = dma_resv_trylock(bo->base.resv); + ret = reservation_object_trylock(bo->resv); *locked = ret; if (busy) *busy = !ret; @@ -818,10 +814,10 @@ static int ttm_mem_evict_wait_busy(struc return -EBUSY; if (ctx->interruptible) - r = dma_resv_lock_interruptible(busy_bo->base.resv, + r = reservation_object_lock_interruptible(busy_bo->resv, ticket); else - r = dma_resv_lock(busy_bo->base.resv, ticket); + r = reservation_object_lock(busy_bo->resv, ticket); /* * TODO: It would be better to keep the BO locked until allocation is at @@ -829,7 +825,7 @@ static int ttm_mem_evict_wait_busy(struc * of TTM. */ if (!r) - dma_resv_unlock(busy_bo->base.resv); + reservation_object_unlock(busy_bo->resv); return r == -EDEADLK ? -EBUSY : r; } @@ -854,8 +850,8 @@ static int ttm_mem_evict_first(struct tt if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, &busy)) { - if (busy && !busy_bo && ticket != - dma_resv_locking_ctx(bo->base.resv)) + if (busy && !busy_bo && + bo->resv->lock.ctx != ticket) busy_bo = bo; continue; } @@ -863,7 +859,7 @@ static int ttm_mem_evict_first(struct tt if (place && !bdev->driver->eviction_valuable(bo, place)) { if (locked) - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); continue; } break; @@ -935,9 +931,9 @@ static int ttm_bo_add_move_fence(struct spin_unlock(&man->move_lock); if (fence) { - dma_resv_add_shared_fence(bo->base.resv, fence); + reservation_object_add_shared_fence(bo->resv, fence); - ret = dma_resv_reserve_shared(bo->base.resv, 1); + ret = reservation_object_reserve_shared(bo->resv, 1); if (unlikely(ret)) { dma_fence_put(fence); return ret; @@ -961,10 +957,8 @@ static int ttm_bo_mem_force_space(struct { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - struct ww_acquire_ctx *ticket; int ret; - ticket = dma_resv_locking_ctx(bo->base.resv); do { ret = (*man->func->get_node)(man, bo, place, mem); if (unlikely(ret != 0)) @@ -972,7 +966,7 @@ static int ttm_bo_mem_force_space(struct if (mem->mm_node) break; ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx, - ticket); + bo->resv->lock.ctx); if (unlikely(ret != 0)) return ret; } while (1); @@ -1094,7 +1088,7 @@ int ttm_bo_mem_space(struct ttm_buffer_o bool type_found = false; int i, ret; - ret = dma_resv_reserve_shared(bo->base.resv, 1); + ret = reservation_object_reserve_shared(bo->resv, 1); if (unlikely(ret)) return ret; @@ -1175,7 +1169,7 @@ static int ttm_bo_move_buffer(struct ttm int ret = 0; struct ttm_mem_reg mem; - dma_resv_assert_held(bo->base.resv); + reservation_object_assert_held(bo->resv); mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; @@ -1245,7 +1239,7 @@ int ttm_bo_validate(struct ttm_buffer_ob int ret; uint32_t new_flags; - dma_resv_assert_held(bo->base.resv); + reservation_object_assert_held(bo->resv); /* * Check whether we need to move buffer. */ @@ -1282,7 +1276,7 @@ int ttm_bo_init_reserved(struct ttm_bo_d struct ttm_operation_ctx *ctx, size_t acc_size, struct sg_table *sg, - struct dma_resv *resv, + struct reservation_object *resv, void (*destroy) (struct ttm_buffer_object *)) { int ret = 0; @@ -1335,20 +1329,14 @@ int ttm_bo_init_reserved(struct ttm_bo_d bo->acc_size = acc_size; bo->sg = sg; if (resv) { - bo->base.resv = resv; - dma_resv_assert_held(bo->base.resv); + bo->resv = resv; + reservation_object_assert_held(bo->resv); } else { - bo->base.resv = &bo->base._resv; - } - if (!ttm_bo_uses_embedded_gem_object(bo)) { - /* - * bo.gem is not initialized, so we have to setup the - * struct elements we want use regardless. - */ - dma_resv_init(&bo->base._resv); - drm_vma_node_reset(&bo->base.vma_node); + bo->resv = &bo->ttm_resv; } + reservation_object_init(&bo->ttm_resv); atomic_inc(&bo->bdev->glob->bo_count); + drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1356,14 +1344,14 @@ int ttm_bo_init_reserved(struct ttm_bo_d */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node, + ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, bo->mem.num_pages); /* passed reservation objects should already be locked, * since otherwise lockdep will be angered in radeon. */ if (!resv) { - locked = dma_resv_trylock(bo->base.resv); + locked = reservation_object_trylock(bo->resv); WARN_ON(!locked); } @@ -1397,7 +1385,7 @@ int ttm_bo_init(struct ttm_bo_device *bd bool interruptible, size_t acc_size, struct sg_table *sg, - struct dma_resv *resv, + struct reservation_object *resv, void (*destroy) (struct ttm_buffer_object *)) { struct ttm_operation_ctx ctx = { interruptible, false }; @@ -1784,7 +1772,7 @@ void ttm_bo_unmap_virtual_locked(struct { struct ttm_bo_device *bdev = bo->bdev; - drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); + drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); ttm_mem_io_free_vm(bo); } @@ -1807,13 +1795,13 @@ int ttm_bo_wait(struct ttm_buffer_object long timeout = 15 * HZ; if (no_wait) { - if (dma_resv_test_signaled_rcu(bo->base.resv, true)) + if (reservation_object_test_signaled_rcu(bo->resv, true)) return 0; else return -EBUSY; } - timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, + timeout = reservation_object_wait_timeout_rcu(bo->resv, true, interruptible, timeout); if (timeout < 0) return timeout; @@ -1821,7 +1809,7 @@ int ttm_bo_wait(struct ttm_buffer_object if (timeout == 0) return -EBUSY; - dma_resv_add_excl_fence(bo->base.resv, NULL); + reservation_object_add_excl_fence(bo->resv, NULL); return 0; } EXPORT_SYMBOL(ttm_bo_wait); @@ -1937,7 +1925,7 @@ out: * already swapped buffer. */ if (locked) - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } @@ -1975,14 +1963,14 @@ int ttm_bo_wait_unreserved(struct ttm_bu ret = mutex_lock_interruptible(&bo->wu_mutex); if (unlikely(ret != 0)) return -ERESTARTSYS; - if (!dma_resv_is_locked(bo->base.resv)) + if (!ww_mutex_is_locked(&bo->resv->lock)) goto out_unlock; - ret = dma_resv_lock_interruptible(bo->base.resv, NULL); + ret = reservation_object_lock_interruptible(bo->resv, NULL); if (ret == -EINTR) ret = -ERESTARTSYS; if (unlikely(ret != 0)) goto out_unlock; - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); out_unlock: mutex_unlock(&bo->wu_mutex); diff -rupN a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c --- a/drivers/gpu/drm/ttm/ttm_bo_util.c 2019-09-23 21:45:36.543567991 +0200 +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c 2019-09-23 21:44:38.827735797 +0200 @@ -38,7 +38,7 @@ #include #include #include -#include +#include struct ttm_transfer_obj { struct ttm_buffer_object base; @@ -510,16 +510,16 @@ static int ttm_buffer_object_transfer(st INIT_LIST_HEAD(&fbo->base.io_reserve_lru); mutex_init(&fbo->base.wu_mutex); fbo->base.moving = NULL; - drm_vma_node_reset(&fbo->base.base.vma_node); + drm_vma_node_reset(&fbo->base.vma_node); atomic_set(&fbo->base.cpu_writers, 0); kref_init(&fbo->base.list_kref); kref_init(&fbo->base.kref); fbo->base.destroy = &ttm_transfered_destroy; fbo->base.acc_size = 0; - fbo->base.base.resv = &fbo->base.base._resv; - dma_resv_init(fbo->base.base.resv); - ret = dma_resv_trylock(fbo->base.base.resv); + fbo->base.resv = &fbo->base.ttm_resv; + reservation_object_init(fbo->base.resv); + ret = reservation_object_trylock(fbo->base.resv); WARN_ON(!ret); *new_obj = &fbo->base; @@ -689,7 +689,7 @@ int ttm_bo_move_accel_cleanup(struct ttm int ret; struct ttm_buffer_object *ghost_obj; - dma_resv_add_excl_fence(bo->base.resv, fence); + reservation_object_add_excl_fence(bo->resv, fence); if (evict) { ret = ttm_bo_wait(bo, false, false); if (ret) @@ -716,7 +716,7 @@ int ttm_bo_move_accel_cleanup(struct ttm if (ret) return ret; - dma_resv_add_excl_fence(ghost_obj->base.resv, fence); + reservation_object_add_excl_fence(ghost_obj->resv, fence); /** * If we're not moving to fixed memory, the TTM object @@ -752,7 +752,7 @@ int ttm_bo_pipeline_move(struct ttm_buff int ret; - dma_resv_add_excl_fence(bo->base.resv, fence); + reservation_object_add_excl_fence(bo->resv, fence); if (!evict) { struct ttm_buffer_object *ghost_obj; @@ -772,7 +772,7 @@ int ttm_bo_pipeline_move(struct ttm_buff if (ret) return ret; - dma_resv_add_excl_fence(ghost_obj->base.resv, fence); + reservation_object_add_excl_fence(ghost_obj->resv, fence); /** * If we're not moving to fixed memory, the TTM object @@ -841,7 +841,7 @@ int ttm_bo_pipeline_gutting(struct ttm_b if (ret) return ret; - ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv); + ret = reservation_object_copy_fences(ghost->resv, bo->resv); /* Last resort, wait for the BO to be idle when we are OOM */ if (ret) ttm_bo_wait(bo, false, false); diff -rupN a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c 2019-09-23 21:45:36.543567991 +0200 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c 2019-09-23 21:44:38.827735797 +0200 @@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(s ttm_bo_get(bo); up_read(&vmf->vma->vm_mm->mmap_sem); (void) dma_fence_wait(bo->moving, true); - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); ttm_bo_put(bo); goto out_unlock; } @@ -131,7 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct * for reserve, and if it fails, retry the fault after waiting * for the buffer to become unreserved. */ - if (unlikely(!dma_resv_trylock(bo->base.resv))) { + if (unlikely(!reservation_object_trylock(bo->resv))) { if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { ttm_bo_get(bo); @@ -211,9 +211,9 @@ static vm_fault_t ttm_bo_vm_fault(struct } page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + - vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); + vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); page_last = vma_pages(vma) + vma->vm_pgoff - - drm_vma_node_start(&bo->base.vma_node); + drm_vma_node_start(&bo->vma_node); if (unlikely(page_offset >= bo->num_pages)) { ret = VM_FAULT_SIGBUS; @@ -267,7 +267,7 @@ static vm_fault_t ttm_bo_vm_fault(struct } else if (unlikely(!page)) { break; } - page->index = drm_vma_node_start(&bo->base.vma_node) + + page->index = drm_vma_node_start(&bo->vma_node) + page_offset; pfn = page_to_pfn(page); } @@ -296,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct out_io_unlock: ttm_mem_io_unlock(man); out_unlock: - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); return ret; } @@ -413,8 +413,7 @@ static struct ttm_buffer_object *ttm_bo_ node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); if (likely(node)) { - bo = container_of(node, struct ttm_buffer_object, - base.vma_node); + bo = container_of(node, struct ttm_buffer_object, vma_node); bo = ttm_bo_get_unless_zero(bo); } diff -rupN a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c 2019-09-23 21:45:36.543567991 +0200 +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c 2019-09-23 21:44:38.827735797 +0200 @@ -39,7 +39,7 @@ static void ttm_eu_backoff_reservation_r list_for_each_entry_continue_reverse(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); } } @@ -71,7 +71,7 @@ void ttm_eu_backoff_reservation(struct w if (list_empty(&bo->lru)) ttm_bo_add_to_lru(bo); - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); } spin_unlock(&glob->lru_lock); @@ -114,7 +114,7 @@ int ttm_eu_reserve_buffers(struct ww_acq ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); ret = -EBUSY; @@ -130,7 +130,7 @@ int ttm_eu_reserve_buffers(struct ww_acq if (!entry->num_shared) continue; - ret = dma_resv_reserve_shared(bo->base.resv, + ret = reservation_object_reserve_shared(bo->resv, entry->num_shared); if (!ret) continue; @@ -144,16 +144,16 @@ int ttm_eu_reserve_buffers(struct ww_acq if (ret == -EDEADLK) { if (intr) { - ret = dma_resv_lock_slow_interruptible(bo->base.resv, - ticket); + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); } else { - dma_resv_lock_slow(bo->base.resv, ticket); + ww_mutex_lock_slow(&bo->resv->lock, ticket); ret = 0; } } if (!ret && entry->num_shared) - ret = dma_resv_reserve_shared(bo->base.resv, + ret = reservation_object_reserve_shared(bo->resv, entry->num_shared); if (unlikely(ret != 0)) { @@ -201,14 +201,14 @@ void ttm_eu_fence_buffer_objects(struct list_for_each_entry(entry, list, head) { bo = entry->bo; if (entry->num_shared) - dma_resv_add_shared_fence(bo->base.resv, fence); + reservation_object_add_shared_fence(bo->resv, fence); else - dma_resv_add_excl_fence(bo->base.resv, fence); + reservation_object_add_excl_fence(bo->resv, fence); if (list_empty(&bo->lru)) ttm_bo_add_to_lru(bo); else ttm_bo_move_to_lru_tail(bo, NULL); - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); } spin_unlock(&glob->lru_lock); if (ticket) diff -rupN a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c --- a/drivers/gpu/drm/ttm/ttm_tt.c 2019-09-23 21:45:36.547567979 +0200 +++ b/drivers/gpu/drm/ttm/ttm_tt.c 2019-09-23 21:44:38.827735797 +0200 @@ -48,7 +48,7 @@ int ttm_tt_create(struct ttm_buffer_obje struct ttm_bo_device *bdev = bo->bdev; uint32_t page_flags = 0; - dma_resv_assert_held(bo->base.resv); + reservation_object_assert_held(bo->resv); if (bdev->need_dma32) page_flags |= TTM_PAGE_FLAG_DMA32; diff -rupN a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h --- a/include/drm/drm_agpsupport.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_agpsupport.h 2019-09-23 21:44:38.827735797 +0200 @@ -31,6 +31,11 @@ struct drm_agp_head { void drm_free_agp(struct agp_memory * handle, int pages); int drm_bind_agp(struct agp_memory * handle, unsigned int start); int drm_unbind_agp(struct agp_memory * handle); +struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset, + uint32_t type); struct drm_agp_head *drm_agp_init(struct drm_device *dev); void drm_legacy_agp_clear(struct drm_device *dev); @@ -75,6 +80,15 @@ static inline int drm_unbind_agp(struct return -ENODEV; } +static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset, + uint32_t type) +{ + return NULL; +} + static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) { return NULL; diff -rupN a/include/drm/drm_connector.h b/include/drm/drm_connector.h --- a/include/drm/drm_connector.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_connector.h 2019-09-23 21:44:38.827735797 +0200 @@ -41,7 +41,6 @@ struct drm_property; struct drm_property_blob; struct drm_printer; struct edid; -struct i2c_adapter; enum drm_connector_force { DRM_FORCE_UNSPECIFIED, @@ -324,8 +323,6 @@ enum drm_panel_orientation { * edge of the pixel clock * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE: Sync signals are sampled on the falling * edge of the pixel clock - * @DRM_BUS_FLAG_SHARP_SIGNALS: Set if the Sharp-specific signals - * (SPL, CLS, PS, REV) must be used */ enum drm_bus_flags { DRM_BUS_FLAG_DE_LOW = BIT(0), @@ -344,7 +341,6 @@ enum drm_bus_flags { DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE, DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE, DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_POSEDGE, - DRM_BUS_FLAG_SHARP_SIGNALS = BIT(8), }; /** @@ -543,8 +539,8 @@ struct drm_connector_state { * * This is also used in the atomic helpers to map encoders to their * current and previous connectors, see - * drm_atomic_get_old_connector_for_encoder() and - * drm_atomic_get_new_connector_for_encoder(). + * &drm_atomic_get_old_connector_for_encoder() and + * &drm_atomic_get_new_connector_for_encoder(). * * NOTE: Atomic drivers must fill this out (either themselves or through * helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will @@ -603,12 +599,6 @@ struct drm_connector_state { unsigned int content_type; /** - * @hdcp_content_type: Connector property to pass the type of - * protected content. This is most commonly used for HDCP. - */ - unsigned int hdcp_content_type; - - /** * @scaling_mode: Connector property to control the * upscaling, mostly used for built-in panels. */ @@ -1318,18 +1308,6 @@ struct drm_connector { * [0]: progressive, [1]: interlaced */ int audio_latency[2]; - - /** - * @ddc: associated ddc adapter. - * A connector usually has its associated ddc adapter. If a driver uses - * this field, then an appropriate symbolic link is created in connector - * sysfs directory to make it easy for the user to tell which i2c - * adapter is for a particular display. - * - * The field should be set by calling drm_connector_init_with_ddc(). - */ - struct i2c_adapter *ddc; - /** * @null_edid_counter: track sinks that give us all zeros for the EDID. * Needed to workaround some HW bugs where we get all 0s @@ -1418,11 +1396,6 @@ int drm_connector_init(struct drm_device struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type); -int drm_connector_init_with_ddc(struct drm_device *dev, - struct drm_connector *connector, - const struct drm_connector_funcs *funcs, - int connector_type, - struct i2c_adapter *ddc); void drm_connector_attach_edid_property(struct drm_connector *connector); int drm_connector_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); @@ -1508,7 +1481,6 @@ const char *drm_get_dvi_i_select_name(in const char *drm_get_tv_subconnector_name(int val); const char *drm_get_tv_select_name(int val); const char *drm_get_content_protection_name(int val); -const char *drm_get_hdcp_content_type_name(int val); int drm_mode_create_dvi_i_properties(struct drm_device *dev); int drm_mode_create_tv_margin_properties(struct drm_device *dev); diff -rupN a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h --- a/include/drm/drm_crtc.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_crtc.h 2019-09-23 21:44:38.827735797 +0200 @@ -756,9 +756,6 @@ struct drm_crtc_funcs { * provided from the configured source. Drivers must accept an "auto" * source name that will select a default source for this CRTC. * - * This may trigger an atomic modeset commit if necessary, to enable CRC - * generation. - * * Note that "auto" can depend upon the current modeset configuration, * e.g. it could pick an encoder or output specific CRC sampling point. * @@ -770,7 +767,6 @@ struct drm_crtc_funcs { * 0 on success or a negative error code on failure. */ int (*set_crc_source)(struct drm_crtc *crtc, const char *source); - /** * @verify_crc_source: * diff -rupN a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h --- a/include/drm/drm_dp_helper.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_dp_helper.h 2019-09-23 21:44:38.827735797 +0200 @@ -1309,10 +1309,6 @@ struct drm_dp_aux { * @cec: struct containing fields used for CEC-Tunneling-over-AUX. */ struct drm_dp_aux_cec cec; - /** - * @is_remote: Is this AUX CH actually using sideband messaging. - */ - bool is_remote; }; ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, diff -rupN a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h --- a/include/drm/drm_dp_mst_helper.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_dp_mst_helper.h 2019-09-23 21:44:38.827735797 +0200 @@ -643,17 +643,6 @@ void drm_dp_mst_dump_topology(struct seq void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); int __must_check drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); - -ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, - unsigned int offset, void *buffer, size_t size); -ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, - unsigned int offset, void *buffer, size_t size); - -int drm_dp_mst_connector_late_register(struct drm_connector *connector, - struct drm_dp_mst_port *port); -void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, - struct drm_dp_mst_port *port); - struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); int __must_check diff -rupN a/include/drm/drm_drv.h b/include/drm/drm_drv.h --- a/include/drm/drm_drv.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_drv.h 2019-09-23 21:44:38.827735797 +0200 @@ -62,6 +62,12 @@ enum drm_driver_feature { */ DRIVER_MODESET = BIT(1), /** + * @DRIVER_PRIME: + * + * Driver implements DRM PRIME buffer sharing. + */ + DRIVER_PRIME = BIT(2), + /** * @DRIVER_RENDER: * * Driver supports dedicated render nodes. See also the :ref:`section on @@ -496,25 +502,21 @@ struct drm_driver { * @gem_free_object: deconstructor for drm_gem_objects * * This is deprecated and should not be used by new drivers. Use - * &drm_gem_object_funcs.free instead. + * @gem_free_object_unlocked instead. */ void (*gem_free_object) (struct drm_gem_object *obj); /** * @gem_free_object_unlocked: deconstructor for drm_gem_objects * - * This is deprecated and should not be used by new drivers. Use - * &drm_gem_object_funcs.free instead. - * Compared to @gem_free_object this is not encumbered with - * &drm_device.struct_mutex legacy locking schemes. + * This is for drivers which are not encumbered with &drm_device.struct_mutex + * legacy locking schemes. Use this hook instead of @gem_free_object. */ void (*gem_free_object_unlocked) (struct drm_gem_object *obj); /** * @gem_open_object: * - * This callback is deprecated in favour of &drm_gem_object_funcs.open. - * * Driver hook called upon gem handle creation */ int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); @@ -522,8 +524,6 @@ struct drm_driver { /** * @gem_close_object: * - * This callback is deprecated in favour of &drm_gem_object_funcs.close. - * * Driver hook called upon gem handle release */ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); @@ -531,9 +531,6 @@ struct drm_driver { /** * @gem_print_info: * - * This callback is deprecated in favour of - * &drm_gem_object_funcs.print_info. - * * If driver subclasses struct &drm_gem_object, it can implement this * optional hook for printing additional driver specific info. * @@ -548,108 +545,56 @@ struct drm_driver { /** * @gem_create_object: constructor for gem objects * - * Hook for allocating the GEM object struct, for use by the CMA and - * SHMEM GEM helpers. + * Hook for allocating the GEM object struct, for use by core + * helpers. */ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, size_t size); + + /* prime: */ /** * @prime_handle_to_fd: * - * Main PRIME export function. Should be implemented with - * drm_gem_prime_handle_to_fd() for GEM based drivers. - * - * For an in-depth discussion see :ref:`PRIME buffer sharing - * documentation `. + * export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /** * @prime_fd_to_handle: * - * Main PRIME import function. Should be implemented with - * drm_gem_prime_fd_to_handle() for GEM based drivers. - * - * For an in-depth discussion see :ref:`PRIME buffer sharing - * documentation `. + * import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); /** * @gem_prime_export: * - * Export hook for GEM drivers. Deprecated in favour of - * &drm_gem_object_funcs.export. + * export GEM -> dmabuf + * + * This defaults to drm_gem_prime_export() if not set. */ - struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj, - int flags); + struct dma_buf * (*gem_prime_export)(struct drm_device *dev, + struct drm_gem_object *obj, int flags); /** * @gem_prime_import: * - * Import hook for GEM drivers. + * import dmabuf -> GEM * * This defaults to drm_gem_prime_import() if not set. */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); - - /** - * @gem_prime_pin: - * - * Deprecated hook in favour of &drm_gem_object_funcs.pin. - */ int (*gem_prime_pin)(struct drm_gem_object *obj); - - /** - * @gem_prime_unpin: - * - * Deprecated hook in favour of &drm_gem_object_funcs.unpin. - */ void (*gem_prime_unpin)(struct drm_gem_object *obj); - - - /** - * @gem_prime_get_sg_table: - * - * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table. - */ + struct reservation_object * (*gem_prime_res_obj)( + struct drm_gem_object *obj); struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); - - /** - * @gem_prime_import_sg_table: - * - * Optional hook used by the PRIME helper functions - * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). - */ struct drm_gem_object *(*gem_prime_import_sg_table)( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); - /** - * @gem_prime_vmap: - * - * Deprecated vmap hook for GEM drivers. Please use - * &drm_gem_object_funcs.vmap instead. - */ void *(*gem_prime_vmap)(struct drm_gem_object *obj); - - /** - * @gem_prime_vunmap: - * - * Deprecated vunmap hook for GEM drivers. Please use - * &drm_gem_object_funcs.vunmap instead. - */ void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); - - /** - * @gem_prime_mmap: - * - * mmap hook for GEM drivers, used to implement dma-buf mmap in the - * PRIME helpers. - * - * FIXME: There's way too much duplication going on here, and also moved - * to &drm_gem_object_funcs. - */ int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); @@ -717,9 +662,6 @@ struct drm_driver { /** * @gem_vm_ops: Driver private ops for this object - * - * For GEM drivers this is deprecated in favour of - * &drm_gem_object_funcs.vm_ops. */ const struct vm_operations_struct *gem_vm_ops; diff -rupN a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h --- a/include/drm/drm_gem_framebuffer_helper.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_gem_framebuffer_helper.h 2019-09-23 21:44:38.831735786 +0200 @@ -33,4 +33,11 @@ int drm_gem_fb_prepare_fb(struct drm_pla struct drm_plane_state *state); int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); + +struct drm_framebuffer * +drm_gem_fbdev_fb_create(struct drm_device *dev, + struct drm_fb_helper_surface_size *sizes, + unsigned int pitch_align, struct drm_gem_object *obj, + const struct drm_framebuffer_funcs *funcs); + #endif diff -rupN a/include/drm/drm_gem.h b/include/drm/drm_gem.h --- a/include/drm/drm_gem.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_gem.h 2019-09-23 21:44:38.831735786 +0200 @@ -35,7 +35,7 @@ */ #include -#include +#include #include @@ -101,7 +101,7 @@ struct drm_gem_object_funcs { /** * @pin: * - * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper. + * Pin backing buffer in memory. * * This callback is optional. */ @@ -110,7 +110,7 @@ struct drm_gem_object_funcs { /** * @unpin: * - * Unpin backing buffer. Used by the drm_gem_map_detach() helper. + * Unpin backing buffer. * * This callback is optional. */ @@ -120,21 +120,16 @@ struct drm_gem_object_funcs { * @get_sg_table: * * Returns a Scatter-Gather table representation of the buffer. - * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. - * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() - * in drm_gem_unmap_buf(), therefore these helpers and this callback - * here cannot be used for sg tables pointing at driver private memory - * ranges. + * Used when exporting a buffer. * - * See also drm_prime_pages_to_sg(). + * This callback is mandatory if buffer export is supported. */ struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); /** * @vmap: * - * Returns a virtual address for the buffer. Used by the - * drm_gem_dmabuf_vmap() helper. + * Returns a virtual address for the buffer. * * This callback is optional. */ @@ -143,8 +138,7 @@ struct drm_gem_object_funcs { /** * @vunmap: * - * Releases the the address previously returned by @vmap. Used by the - * drm_gem_dmabuf_vunmap() helper. + * Releases the the address previously returned by @vmap. * * This callback is optional. */ @@ -276,7 +270,7 @@ struct drm_gem_object { * * Normally (@resv == &@_resv) except for imported GEM objects. */ - struct dma_resv *resv; + struct reservation_object *resv; /** * @_resv: @@ -285,7 +279,7 @@ struct drm_gem_object { * * This is unused for imported GEM objects. */ - struct dma_resv _resv; + struct reservation_object _resv; /** * @funcs: @@ -390,7 +384,7 @@ void drm_gem_put_pages(struct drm_gem_ob int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out); struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); -long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, +long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout); int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx); diff -rupN a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h --- a/include/drm/drm_gem_shmem_helper.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_gem_shmem_helper.h 2019-09-23 21:44:38.831735786 +0200 @@ -44,9 +44,6 @@ struct drm_gem_shmem_object { */ unsigned int pages_use_count; - int madv; - struct list_head madv_list; - /** * @pages_mark_dirty_on_put: * @@ -124,18 +121,6 @@ void drm_gem_shmem_unpin(struct drm_gem_ void *drm_gem_shmem_vmap(struct drm_gem_object *obj); void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); -int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv); - -static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) -{ - return (shmem->madv > 0) && - !shmem->vmap_use_count && shmem->sgt && - !shmem->base.dma_buf && !shmem->base.import_attach; -} - -void drm_gem_shmem_purge_locked(struct drm_gem_object *obj); -bool drm_gem_shmem_purge(struct drm_gem_object *obj); - struct drm_gem_shmem_object * drm_gem_shmem_create_with_handle(struct drm_file *file_priv, struct drm_device *dev, size_t size, diff -rupN a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h --- a/include/drm/drm_gem_vram_helper.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_gem_vram_helper.h 2019-09-23 21:44:38.831735786 +0200 @@ -36,6 +36,7 @@ struct vm_area_struct; * video memory becomes scarce. */ struct drm_gem_vram_object { + struct drm_gem_object gem; struct ttm_buffer_object bo; struct ttm_bo_kmap_obj kmap; @@ -67,7 +68,7 @@ static inline struct drm_gem_vram_object static inline struct drm_gem_vram_object *drm_gem_vram_of_gem( struct drm_gem_object *gem) { - return container_of(gem, struct drm_gem_vram_object, bo.base); + return container_of(gem, struct drm_gem_vram_object, gem); } struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, @@ -107,6 +108,7 @@ extern const struct drm_vram_mm_funcs dr * Helpers for struct drm_driver */ +void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem); int drm_gem_vram_driver_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -122,8 +124,30 @@ int drm_gem_vram_driver_dumb_mmap_offset * &struct drm_driver with default functions. */ #define DRM_GEM_VRAM_DRIVER \ + .gem_free_object_unlocked = \ + drm_gem_vram_driver_gem_free_object_unlocked, \ .dumb_create = drm_gem_vram_driver_dumb_create, \ - .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \ - .gem_prime_mmap = drm_gem_prime_mmap + .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset + +/* + * PRIME helpers for struct drm_driver + */ + +int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *obj); +void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *obj); +void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *obj); +void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *obj, + void *vaddr); +int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma); + +#define DRM_GEM_VRAM_DRIVER_PRIME \ + .gem_prime_export = drm_gem_prime_export, \ + .gem_prime_import = drm_gem_prime_import, \ + .gem_prime_pin = drm_gem_vram_driver_gem_prime_pin, \ + .gem_prime_unpin = drm_gem_vram_driver_gem_prime_unpin, \ + .gem_prime_vmap = drm_gem_vram_driver_gem_prime_vmap, \ + .gem_prime_vunmap = drm_gem_vram_driver_gem_prime_vunmap, \ + .gem_prime_mmap = drm_gem_vram_driver_gem_prime_mmap #endif diff -rupN a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h --- a/include/drm/drm_hdcp.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_hdcp.h 2019-09-23 21:44:38.831735786 +0200 @@ -291,12 +291,5 @@ struct drm_connector; bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev, u8 *ksvs, u32 ksv_count); int drm_connector_attach_content_protection_property( - struct drm_connector *connector, bool hdcp_content_type); -void drm_hdcp_update_content_protection(struct drm_connector *connector, - u64 val); - -/* Content Type classification for HDCP2.2 vs others */ -#define DRM_MODE_HDCP_CONTENT_TYPE0 0 -#define DRM_MODE_HDCP_CONTENT_TYPE1 1 - + struct drm_connector *connector); #endif diff -rupN a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h --- a/include/drm/drm_ioctl.h 2019-09-23 21:45:40.279556645 +0200 +++ b/include/drm/drm_ioctl.h 2019-09-23 21:44:38.831735786 +0200 @@ -114,9 +114,6 @@ enum drm_ioctl_flags { * Whether &drm_ioctl_desc.func should be called with the DRM BKL held * or not. Enforced as the default for all modern drivers, hence there * should never be a need to set this flag. - * - * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the - * only legacy IOCTL which needs this. */ DRM_UNLOCKED = BIT(4), /** diff -rupN a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h --- a/include/drm/drm_mode_config.h 2019-09-23 21:45:40.283556634 +0200 +++ b/include/drm/drm_mode_config.h 2019-09-23 21:44:38.831735786 +0200 @@ -849,12 +849,6 @@ struct drm_mode_config { */ struct drm_property *content_protection_property; - /** - * @hdcp_content_type_property: DRM ENUM property for type of - * Protected Content. - */ - struct drm_property *hdcp_content_type_property; - /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; diff -rupN a/include/drm/drm_panel.h b/include/drm/drm_panel.h --- a/include/drm/drm_panel.h 2019-09-23 21:45:40.283556634 +0200 +++ b/include/drm/drm_panel.h 2019-09-23 21:44:38.831735786 +0200 @@ -24,7 +24,6 @@ #ifndef __DRM_PANEL_H__ #define __DRM_PANEL_H__ -#include #include #include @@ -36,6 +35,14 @@ struct display_timing; /** * struct drm_panel_funcs - perform operations on a given panel + * @disable: disable panel (turn off back light, etc.) + * @unprepare: turn off panel + * @prepare: turn on panel and perform set up + * @enable: enable panel (turn on back light, etc.) + * @get_modes: add modes to the connector that the panel is attached to and + * return the number of modes added + * @get_timings: copy display timings into the provided array and return + * the number of display timings available * * The .prepare() function is typically called before the display controller * starts to transmit video data. Panel drivers can use this to turn the panel @@ -61,107 +68,132 @@ struct display_timing; * the panel. This is the job of the .unprepare() function. */ struct drm_panel_funcs { - /** - * @prepare: - * - * Turn on panel and perform set up. - */ - int (*prepare)(struct drm_panel *panel); - - /** - * @enable: - * - * Enable panel (turn on back light, etc.). - */ - int (*enable)(struct drm_panel *panel); - - /** - * @disable: - * - * Disable panel (turn off back light, etc.). - */ int (*disable)(struct drm_panel *panel); - - /** - * @unprepare: - * - * Turn off panel. - */ int (*unprepare)(struct drm_panel *panel); - - /** - * @get_modes: - * - * Add modes to the connector that the panel is attached to and - * return the number of modes added. - */ + int (*prepare)(struct drm_panel *panel); + int (*enable)(struct drm_panel *panel); int (*get_modes)(struct drm_panel *panel); - - /** - * @get_timings: - * - * Copy display timings into the provided array and return - * the number of display timings available. - */ int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, struct display_timing *timings); }; /** * struct drm_panel - DRM panel object + * @drm: DRM device owning the panel + * @connector: DRM connector that the panel is attached to + * @dev: parent device of the panel + * @link: link from panel device (supplier) to DRM device (consumer) + * @funcs: operations that can be performed on the panel + * @list: panel entry in registry */ struct drm_panel { - /** - * @drm: - * - * DRM device owning the panel. - */ struct drm_device *drm; - - /** - * @connector: - * - * DRM connector that the panel is attached to. - */ struct drm_connector *connector; - - /** - * @dev: - * - * Parent device of the panel. - */ struct device *dev; - /** - * @funcs: - * - * Operations that can be performed on the panel. - */ const struct drm_panel_funcs *funcs; - /** - * @list: - * - * Panel entry in registry. - */ struct list_head list; }; +/** + * drm_disable_unprepare - power off a panel + * @panel: DRM panel + * + * Calling this function will completely power off a panel (assert the panel's + * reset, turn off power supplies, ...). After this function has completed, it + * is usually no longer possible to communicate with the panel until another + * call to drm_panel_prepare(). + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_unprepare(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->unprepare) + return panel->funcs->unprepare(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_disable - disable a panel + * @panel: DRM panel + * + * This will typically turn off the panel's backlight or disable the display + * drivers. For smart panels it should still be possible to communicate with + * the integrated circuitry via any command bus after this call. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_disable(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->disable) + return panel->funcs->disable(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_prepare - power on a panel + * @panel: DRM panel + * + * Calling this function will enable power and deassert any reset signals to + * the panel. After this has completed it is possible to communicate with any + * integrated circuitry via a command bus. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_prepare(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->prepare) + return panel->funcs->prepare(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_enable - enable a panel + * @panel: DRM panel + * + * Calling this function will cause the panel display drivers to be turned on + * and the backlight to be enabled. Content will be visible on screen after + * this call completes. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_enable(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->enable) + return panel->funcs->enable(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_get_modes - probe the available display modes of a panel + * @panel: DRM panel + * + * The modes probed from the panel are automatically added to the connector + * that the panel is attached to. + * + * Return: The number of modes available from the panel on success or a + * negative error code on failure. + */ +static inline int drm_panel_get_modes(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->get_modes) + return panel->funcs->get_modes(panel); + + return panel ? -ENOSYS : -EINVAL; +} + void drm_panel_init(struct drm_panel *panel); int drm_panel_add(struct drm_panel *panel); void drm_panel_remove(struct drm_panel *panel); int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); -void drm_panel_detach(struct drm_panel *panel); - -int drm_panel_prepare(struct drm_panel *panel); -int drm_panel_unprepare(struct drm_panel *panel); - -int drm_panel_enable(struct drm_panel *panel); -int drm_panel_disable(struct drm_panel *panel); - -int drm_panel_get_modes(struct drm_panel *panel); +int drm_panel_detach(struct drm_panel *panel); #if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) struct drm_panel *of_drm_find_panel(const struct device_node *np); diff -rupN a/include/drm/drm_prime.h b/include/drm/drm_prime.h --- a/include/drm/drm_prime.h 2019-09-23 21:45:40.283556634 +0200 +++ b/include/drm/drm_prime.h 2019-09-23 21:44:38.831735786 +0200 @@ -42,6 +42,7 @@ * This just contains the internal &struct dma_buf and handle caches for each * &struct drm_file used by the PRIME core code. */ + struct drm_prime_file_private { /* private: */ struct mutex lock; @@ -63,18 +64,25 @@ struct drm_file; struct device; -/* core prime functions */ -struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, - struct dma_buf_export_info *exp_info); -void drm_gem_dmabuf_release(struct dma_buf *dma_buf); - -int drm_gem_prime_fd_to_handle(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, uint32_t *handle); +struct dma_buf *drm_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, + int flags); int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); +int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); -/* helper functions for exporting */ +struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, + struct dma_buf *dma_buf, + struct device *attach_dev); + +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle); +struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, + struct dma_buf_export_info *exp_info); +void drm_gem_dmabuf_release(struct dma_buf *dma_buf); int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach); void drm_gem_map_detach(struct dma_buf *dma_buf, @@ -86,25 +94,12 @@ void drm_gem_unmap_dma_buf(struct dma_bu enum dma_data_direction dir); void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf); void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); - -int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); -struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); -struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, - int flags); - -/* helper functions for importing */ -struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, - struct dma_buf *dma_buf, - struct device *attach_dev); -struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf); - -void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); - int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, dma_addr_t *addrs, int max_pages); +struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); +void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); #endif /* __DRM_PRIME_H__ */ diff -rupN a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h --- a/include/drm/drm_sysfs.h 2019-09-23 21:45:40.283556634 +0200 +++ b/include/drm/drm_sysfs.h 2019-09-23 21:44:38.831735786 +0200 @@ -4,13 +4,10 @@ struct drm_device; struct device; -struct drm_connector; -struct drm_property; int drm_class_device_register(struct device *dev); void drm_class_device_unregister(struct device *dev); void drm_sysfs_hotplug_event(struct drm_device *dev); -void drm_sysfs_connector_status_event(struct drm_connector *connector, - struct drm_property *property); + #endif diff -rupN a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h --- a/include/drm/drm_vblank.h 2019-09-23 21:45:40.283556634 +0200 +++ b/include/drm/drm_vblank.h 2019-09-23 21:44:38.831735786 +0200 @@ -30,6 +30,7 @@ #include #include +#include struct drm_device; struct drm_crtc; diff -rupN a/include/drm/drm_vram_mm_helper.h b/include/drm/drm_vram_mm_helper.h --- a/include/drm/drm_vram_mm_helper.h 2019-09-23 21:45:40.283556634 +0200 +++ b/include/drm/drm_vram_mm_helper.h 2019-09-23 21:44:38.831735786 +0200 @@ -3,8 +3,6 @@ #ifndef DRM_VRAM_MM_HELPER_H #define DRM_VRAM_MM_HELPER_H -#include -#include #include struct drm_device; diff -rupN a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h --- a/include/drm/ttm/ttm_bo_api.h 2019-09-23 21:45:40.283556634 +0200 +++ b/include/drm/ttm/ttm_bo_api.h 2019-09-23 21:44:38.831735786 +0200 @@ -31,7 +31,6 @@ #ifndef _TTM_BO_API_H_ #define _TTM_BO_API_H_ -#include #include #include #include @@ -40,7 +39,7 @@ #include #include #include -#include +#include struct ttm_bo_global; @@ -128,7 +127,6 @@ struct ttm_tt; /** * struct ttm_buffer_object * - * @base: drm_gem_object superclass data. * @bdev: Pointer to the buffer object device structure. * @type: The bo type. * @destroy: Destruction function. If NULL, kfree is used. @@ -152,6 +150,7 @@ struct ttm_tt; * @ddestroy: List head for the delayed destroy list. * @swap: List head for swap LRU list. * @moving: Fence set when BO is moving + * @vma_node: Address space manager node. * @offset: The current GPU offset, which can have different meanings * depending on the memory type. For SYSTEM type memory, it should be 0. * @cur_placement: Hint of current placement. @@ -170,8 +169,6 @@ struct ttm_tt; */ struct ttm_buffer_object { - struct drm_gem_object base; - /** * Members constant at init. */ @@ -218,6 +215,9 @@ struct ttm_buffer_object { */ struct dma_fence *moving; + + struct drm_vma_offset_node vma_node; + unsigned priority; /** @@ -230,6 +230,8 @@ struct ttm_buffer_object { struct sg_table *sg; + struct reservation_object *resv; + struct reservation_object ttm_resv; struct mutex wu_mutex; }; @@ -273,7 +275,7 @@ struct ttm_bo_kmap_obj { struct ttm_operation_ctx { bool interruptible; bool no_wait_gpu; - struct dma_resv *resv; + struct reservation_object *resv; uint64_t bytes_moved; uint32_t flags; }; @@ -493,7 +495,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo * @page_alignment: Data alignment in pages. * @ctx: TTM operation context for memory allocation. * @acc_size: Accounted size for this object. - * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. + * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * * This function initializes a pre-allocated struct ttm_buffer_object. @@ -526,7 +528,7 @@ int ttm_bo_init_reserved(struct ttm_bo_d struct ttm_operation_ctx *ctx, size_t acc_size, struct sg_table *sg, - struct dma_resv *resv, + struct reservation_object *resv, void (*destroy) (struct ttm_buffer_object *)); /** @@ -545,7 +547,7 @@ int ttm_bo_init_reserved(struct ttm_bo_d * point to the shmem object backing a GEM object if TTM is used to back a * GEM user interface. * @acc_size: Accounted size for this object. - * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. + * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * * This function initializes a pre-allocated struct ttm_buffer_object. @@ -570,7 +572,7 @@ int ttm_bo_init(struct ttm_bo_device *bd unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, bool interrubtible, size_t acc_size, - struct sg_table *sg, struct dma_resv *resv, + struct sg_table *sg, struct reservation_object *resv, void (*destroy) (struct ttm_buffer_object *)); /** @@ -766,23 +768,4 @@ int ttm_bo_swapout(struct ttm_bo_global struct ttm_operation_ctx *ctx); void ttm_bo_swapout_all(struct ttm_bo_device *bdev); int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); - -/** - * ttm_bo_uses_embedded_gem_object - check if the given bo uses the - * embedded drm_gem_object. - * - * Most ttm drivers are using gem too, so the embedded - * ttm_buffer_object.base will be initialized by the driver (before - * calling ttm_bo_init). It is also possible to use ttm without gem - * though (vmwgfx does that). - * - * This helper will figure whenever a given ttm bo is a gem object too - * or not. - * - * @bo: The bo to check. - */ -static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo) -{ - return bo->base.dev != NULL; -} #endif diff -rupN a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h --- a/include/drm/ttm/ttm_bo_driver.h 2019-09-23 21:45:40.283556634 +0200 +++ b/include/drm/ttm/ttm_bo_driver.h 2019-09-23 21:44:38.831735786 +0200 @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include "ttm_bo_api.h" #include "ttm_memory.h" @@ -390,16 +390,6 @@ struct ttm_bo_driver { * notify driver that a BO was deleted from LRU. */ void (*del_from_lru_notify)(struct ttm_buffer_object *bo); - - /** - * Notify the driver that we're about to release a BO - * - * @bo: BO that is about to be released - * - * Gives the driver a chance to do any cleanup, including - * adding fences that may force a delayed delete - */ - void (*release_notify)(struct ttm_buffer_object *bo); }; /** @@ -664,14 +654,14 @@ static inline int __ttm_bo_reserve(struc if (WARN_ON(ticket)) return -EBUSY; - success = dma_resv_trylock(bo->base.resv); + success = reservation_object_trylock(bo->resv); return success ? 0 : -EBUSY; } if (interruptible) - ret = dma_resv_lock_interruptible(bo->base.resv, ticket); + ret = reservation_object_lock_interruptible(bo->resv, ticket); else - ret = dma_resv_lock(bo->base.resv, ticket); + ret = reservation_object_lock(bo->resv, ticket); if (ret == -EINTR) return -ERESTARTSYS; return ret; @@ -755,10 +745,10 @@ static inline int ttm_bo_reserve_slowpat WARN_ON(!kref_read(&bo->kref)); if (interruptible) - ret = dma_resv_lock_slow_interruptible(bo->base.resv, - ticket); + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); else - dma_resv_lock_slow(bo->base.resv, ticket); + ww_mutex_lock_slow(&bo->resv->lock, ticket); if (likely(ret == 0)) ttm_bo_del_sub_from_lru(bo); @@ -783,7 +773,7 @@ static inline void ttm_bo_unreserve(stru else ttm_bo_move_to_lru_tail(bo, NULL); spin_unlock(&bo->bdev->glob->lru_lock); - dma_resv_unlock(bo->base.resv); + reservation_object_unlock(bo->resv); } /* diff -rupN a/include/linux/dma-buf.h b/include/linux/dma-buf.h --- a/include/linux/dma-buf.h 2019-09-23 21:45:40.339556463 +0200 +++ b/include/linux/dma-buf.h 2019-09-23 21:44:38.831735786 +0200 @@ -306,7 +306,7 @@ struct dma_buf { struct module *owner; struct list_head list_node; void *priv; - struct dma_resv *resv; + struct reservation_object *resv; /* poll support */ wait_queue_head_t poll; @@ -365,7 +365,7 @@ struct dma_buf_export_info { const struct dma_buf_ops *ops; size_t size; int flags; - struct dma_resv *resv; + struct reservation_object *resv; void *priv; }; diff -rupN a/include/linux/dma-fence.h b/include/linux/dma-fence.h --- a/include/linux/dma-fence.h 2019-09-23 21:45:40.339556463 +0200 +++ b/include/linux/dma-fence.h 2019-09-23 21:44:38.831735786 +0200 @@ -63,35 +63,15 @@ struct dma_fence_cb; * been completed, or never called at all. */ struct dma_fence { - spinlock_t *lock; + struct kref refcount; const struct dma_fence_ops *ops; - /* - * We clear the callback list on kref_put so that by the time we - * release the fence it is unused. No one should be adding to the - * cb_list that they don't themselves hold a reference for. - * - * The lifetime of the timestamp is similarly tied to both the - * rcu freelist and the cb_list. The timestamp is only set upon - * signaling while simultaneously notifying the cb_list. Ergo, we - * only use either the cb_list of timestamp. Upon destruction, - * neither are accessible, and so we can use the rcu. This means - * that the cb_list is *only* valid until the signal bit is set, - * and to read either you *must* hold a reference to the fence, - * and not just the rcu_read_lock. - * - * Listed in chronological order. - */ - union { - struct list_head cb_list; - /* @cb_list replaced by @timestamp on dma_fence_signal() */ - ktime_t timestamp; - /* @timestamp replaced by @rcu on dma_fence_release() */ - struct rcu_head rcu; - }; + struct rcu_head rcu; + struct list_head cb_list; + spinlock_t *lock; u64 context; u64 seqno; unsigned long flags; - struct kref refcount; + ktime_t timestamp; int error; }; @@ -293,7 +273,7 @@ static inline struct dma_fence *dma_fenc } /** - * dma_fence_get_rcu - get a fence from a dma_resv_list with + * dma_fence_get_rcu - get a fence from a reservation_object_list with * rcu read lock * @fence: fence to increase refcount of * @@ -317,7 +297,7 @@ static inline struct dma_fence *dma_fenc * so long as the caller is using RCU on the pointer to the fence. * * An alternative mechanism is to employ a seqlock to protect a bunch of - * fences, such as used by struct dma_resv. When using a seqlock, + * fences, such as used by struct reservation_object. When using a seqlock, * the seqlock must be taken before and checked after a reference to the * fence is acquired (as shown here). * diff -rupN a/include/linux/reservation.h b/include/linux/reservation.h --- a/include/linux/reservation.h 1970-01-01 01:00:00.000000000 +0100 +++ b/include/linux/reservation.h 2019-09-23 21:44:38.831735786 +0200 @@ -0,0 +1,297 @@ +/* + * Header file for reservations for dma-buf and ttm + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Copyright (C) 2012-2013 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * Thomas Hellstrom + * + * Based on bo.c which bears the following copyright notice, + * but is dual licensed: + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _LINUX_RESERVATION_H +#define _LINUX_RESERVATION_H + +#include +#include +#include +#include +#include + +extern struct ww_class reservation_ww_class; +extern struct lock_class_key reservation_seqcount_class; +extern const char reservation_seqcount_string[]; + +/** + * struct reservation_object_list - a list of shared fences + * @rcu: for internal use + * @shared_count: table of shared fences + * @shared_max: for growing shared fence table + * @shared: shared fence table + */ +struct reservation_object_list { + struct rcu_head rcu; + u32 shared_count, shared_max; + struct dma_fence __rcu *shared[]; +}; + +/** + * struct reservation_object - a reservation object manages fences for a buffer + * @lock: update side lock + * @seq: sequence count for managing RCU read-side synchronization + * @fence_excl: the exclusive fence, if there is one currently + * @fence: list of current shared fences + */ +struct reservation_object { + struct ww_mutex lock; + seqcount_t seq; + + struct dma_fence __rcu *fence_excl; + struct reservation_object_list __rcu *fence; +}; + +#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) +#define reservation_object_assert_held(obj) \ + lockdep_assert_held(&(obj)->lock.base) + +/** + * reservation_object_init - initialize a reservation object + * @obj: the reservation object + */ +static inline void +reservation_object_init(struct reservation_object *obj) +{ + ww_mutex_init(&obj->lock, &reservation_ww_class); + + __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); + RCU_INIT_POINTER(obj->fence, NULL); + RCU_INIT_POINTER(obj->fence_excl, NULL); +} + +/** + * reservation_object_fini - destroys a reservation object + * @obj: the reservation object + */ +static inline void +reservation_object_fini(struct reservation_object *obj) +{ + int i; + struct reservation_object_list *fobj; + struct dma_fence *excl; + + /* + * This object should be dead and all references must have + * been released to it, so no need to be protected with rcu. + */ + excl = rcu_dereference_protected(obj->fence_excl, 1); + if (excl) + dma_fence_put(excl); + + fobj = rcu_dereference_protected(obj->fence, 1); + if (fobj) { + for (i = 0; i < fobj->shared_count; ++i) + dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); + + kfree(fobj); + } + + ww_mutex_destroy(&obj->lock); +} + +/** + * reservation_object_get_list - get the reservation object's + * shared fence list, with update-side lock held + * @obj: the reservation object + * + * Returns the shared fence list. Does NOT take references to + * the fence. The obj->lock must be held. + */ +static inline struct reservation_object_list * +reservation_object_get_list(struct reservation_object *obj) +{ + return rcu_dereference_protected(obj->fence, + reservation_object_held(obj)); +} + +/** + * reservation_object_lock - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object for exclusive access and modification. Note, + * that the lock is only against other writers, readers will run concurrently + * with a writer under RCU. The seqlock is used to notify readers if they + * overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int +reservation_object_lock(struct reservation_object *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock(&obj->lock, ctx); +} + +/** + * reservation_object_lock_interruptible - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object interruptible for exclusive access and + * modification. Note, that the lock is only against other writers, readers + * will run concurrently with a writer under RCU. The seqlock is used to + * notify readers if they overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int +reservation_object_lock_interruptible(struct reservation_object *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock_interruptible(&obj->lock, ctx); +} + + +/** + * reservation_object_trylock - trylock the reservation object + * @obj: the reservation object + * + * Tries to lock the reservation object for exclusive access and modification. + * Note, that the lock is only against other writers, readers will run + * concurrently with a writer under RCU. The seqlock is used to notify readers + * if they overlap with a writer. + * + * Also note that since no context is provided, no deadlock protection is + * possible. + * + * Returns true if the lock was acquired, false otherwise. + */ +static inline bool __must_check +reservation_object_trylock(struct reservation_object *obj) +{ + return ww_mutex_trylock(&obj->lock); +} + +/** + * reservation_object_unlock - unlock the reservation object + * @obj: the reservation object + * + * Unlocks the reservation object following exclusive access. + */ +static inline void +reservation_object_unlock(struct reservation_object *obj) +{ +#ifdef CONFIG_DEBUG_MUTEXES + /* Test shared fence slot reservation */ + if (rcu_access_pointer(obj->fence)) { + struct reservation_object_list *fence = + reservation_object_get_list(obj); + + fence->shared_max = fence->shared_count; + } +#endif + ww_mutex_unlock(&obj->lock); +} + +/** + * reservation_object_get_excl - get the reservation object's + * exclusive fence, with update-side lock held + * @obj: the reservation object + * + * Returns the exclusive fence (if any). Does NOT take a + * reference. Writers must hold obj->lock, readers may only + * hold a RCU read side lock. + * + * RETURNS + * The exclusive fence or NULL + */ +static inline struct dma_fence * +reservation_object_get_excl(struct reservation_object *obj) +{ + return rcu_dereference_protected(obj->fence_excl, + reservation_object_held(obj)); +} + +/** + * reservation_object_get_excl_rcu - get the reservation object's + * exclusive fence, without lock held. + * @obj: the reservation object + * + * If there is an exclusive fence, this atomically increments it's + * reference count and returns it. + * + * RETURNS + * The exclusive fence or NULL if none + */ +static inline struct dma_fence * +reservation_object_get_excl_rcu(struct reservation_object *obj) +{ + struct dma_fence *fence; + + if (!rcu_access_pointer(obj->fence_excl)) + return NULL; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&obj->fence_excl); + rcu_read_unlock(); + + return fence; +} + +int reservation_object_reserve_shared(struct reservation_object *obj, + unsigned int num_fences); +void reservation_object_add_shared_fence(struct reservation_object *obj, + struct dma_fence *fence); + +void reservation_object_add_excl_fence(struct reservation_object *obj, + struct dma_fence *fence); + +int reservation_object_get_fences_rcu(struct reservation_object *obj, + struct dma_fence **pfence_excl, + unsigned *pshared_count, + struct dma_fence ***pshared); + +int reservation_object_copy_fences(struct reservation_object *dst, + struct reservation_object *src); + +long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + bool wait_all, bool intr, + unsigned long timeout); + +bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + bool test_all); + +#endif /* _LINUX_RESERVATION_H */