diff -Naur amdgpu-19.10-785425.bak/amd/amdgpu/amdgpu_connectors.c amdgpu-19.10-785425/amd/amdgpu/amdgpu_connectors.c --- amdgpu-19.10-785425.bak/amd/amdgpu/amdgpu_connectors.c 2019-04-17 08:07:59.000000000 +0200 +++ amdgpu-19.10-785425/amd/amdgpu/amdgpu_connectors.c 2019-06-11 13:36:22.564594363 +0200 @@ -365,11 +365,11 @@ int ret; if (amdgpu_connector->edid) { - drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid); + drm_connector_update_edid_property(connector, amdgpu_connector->edid); ret = drm_add_edid_modes(connector, amdgpu_connector->edid); return ret; } - drm_mode_connector_update_edid_property(connector, NULL); + drm_connector_update_edid_property(connector, NULL); return 0; } diff -Naur amdgpu-19.10-785425.bak/amd/amdgpu/amdgpu_encoders.c amdgpu-19.10-785425/amd/amdgpu/amdgpu_encoders.c --- amdgpu-19.10-785425.bak/amd/amdgpu/amdgpu_encoders.c 2019-04-17 08:07:59.000000000 +0200 +++ amdgpu-19.10-785425/amd/amdgpu/amdgpu_encoders.c 2019-06-11 13:36:22.564594363 +0200 @@ -47,7 +47,7 @@ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { amdgpu_encoder = to_amdgpu_encoder(encoder); if (amdgpu_encoder->devices & amdgpu_connector->devices) { - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector); adev->mode_info.bl_encoder = amdgpu_encoder; diff -Naur amdgpu-19.10-785425.bak/amd/amdgpu/amdgpu_mn.c amdgpu-19.10-785425/amd/amdgpu/amdgpu_mn.c --- amdgpu-19.10-785425.bak/amd/amdgpu/amdgpu_mn.c 2019-04-17 08:07:59.000000000 +0200 +++ amdgpu-19.10-785425/amd/amdgpu/amdgpu_mn.c 2019-06-11 13:36:22.564594363 +0200 @@ -186,14 +186,29 @@ * * @amn: our notifier */ -static void amdgpu_mn_read_lock(struct amdgpu_mn *amn) -{ - mutex_lock(&amn->read_lock); +// static void amdgpu_mn_read_lock(struct amdgpu_mn *amn) +// { +// mutex_lock(&amn->read_lock); +// if (atomic_inc_return(&amn->recursion) == 1) +// down_read_non_owner(&amn->lock); +// mutex_unlock(&amn->read_lock); +// } + +static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable) +{ + if (blockable) + mutex_lock(&amn->read_lock); + else if (!mutex_trylock(&amn->read_lock)) + return -EAGAIN; + if (atomic_inc_return(&amn->recursion) == 1) down_read_non_owner(&amn->lock); mutex_unlock(&amn->read_lock); + + return 0; } + /** * amdgpu_mn_read_unlock - drop the read side lock for this notifier * @@ -247,10 +262,36 @@ * Block for operations on BOs to finish and mark pages as accessed and * potentially dirty. */ -static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, + +// static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, +// struct mm_struct *mm, +// unsigned long start, +// unsigned long end) +// { +// struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); +// struct interval_tree_node *it; +// +// /* notification is exclusive, but interval is inclusive */ +// end -= 1; +// +// amdgpu_mn_read_lock(amn); +// +// it = interval_tree_iter_first(&amn->objects, start, end); +// while (it) { +// struct amdgpu_mn_node *node; +// +// node = container_of(it, struct amdgpu_mn_node, it); +// it = interval_tree_iter_next(it, start, end); +// +// amdgpu_mn_invalidate_node(node, start, end); +// } +// } + + static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, - unsigned long end) + unsigned long end, + bool blockable) { struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; @@ -258,19 +299,31 @@ /* notification is exclusive, but interval is inclusive */ end -= 1; - amdgpu_mn_read_lock(amn); + /* TODO we should be able to split locking for interval tree and + * amdgpu_mn_invalidate_node + */ + if (amdgpu_mn_read_lock(amn, blockable)) + return -EAGAIN; it = interval_tree_iter_first(&amn->objects, start, end); while (it) { struct amdgpu_mn_node *node; + if (!blockable) { + amdgpu_mn_read_unlock(amn); + return -EAGAIN; + } + node = container_of(it, struct amdgpu_mn_node, it); it = interval_tree_iter_next(it, start, end); amdgpu_mn_invalidate_node(node, start, end); } + + return 0; } + /** * amdgpu_mn_invalidate_range_end_gfx - callback to notify about mm change * @@ -303,10 +356,43 @@ * necessitates evicting all user-mode queues of the process. The BOs * are restorted in amdgpu_mn_invalidate_range_end_hsa. */ -static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, +// static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, +// struct mm_struct *mm, +// unsigned long start, +// unsigned long end) +// { +// struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); +// struct interval_tree_node *it; +// +// /* notification is exclusive, but interval is inclusive */ +// end -= 1; +// +// amdgpu_mn_read_lock(amn); +// +// it = interval_tree_iter_first(&amn->objects, start, end); +// while (it) { +// struct amdgpu_mn_node *node; +// struct amdgpu_bo *bo; +// +// node = container_of(it, struct amdgpu_mn_node, it); +// it = interval_tree_iter_next(it, start, end); +// +// list_for_each_entry(bo, &node->bos, mn_list) { +// struct kgd_mem *mem = bo->kfd_bo; +// +// if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, +// start, end)) +// amdgpu_amdkfd_evict_userptr(mem, mm); +// } +// } +// } + + +static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, - unsigned long end) + unsigned long end, + bool blockable) { struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; @@ -314,13 +400,19 @@ /* notification is exclusive, but interval is inclusive */ end -= 1; - amdgpu_mn_read_lock(amn); + if (amdgpu_mn_read_lock(amn, blockable)) + return -EAGAIN; it = interval_tree_iter_first(&amn->objects, start, end); while (it) { struct amdgpu_mn_node *node; struct amdgpu_bo *bo; + if (!blockable) { + amdgpu_mn_read_unlock(amn); + return -EAGAIN; + } + node = container_of(it, struct amdgpu_mn_node, it); it = interval_tree_iter_next(it, start, end); @@ -332,6 +424,8 @@ amdgpu_amdkfd_evict_userptr(mem, mm); } } + + return 0; } static void amdgpu_mn_invalidate_range_end_hsa(struct mmu_notifier *mn, diff -Naur amdgpu-19.10-785425.bak/amd/amdgpu/amdgpu_prime.c amdgpu-19.10-785425/amd/amdgpu/amdgpu_prime.c --- amdgpu-19.10-785425.bak/amd/amdgpu/amdgpu_prime.c 2019-04-17 08:07:59.000000000 +0200 +++ amdgpu-19.10-785425/amd/amdgpu/amdgpu_prime.c 2019-06-11 13:36:22.564594363 +0200 @@ -246,7 +246,7 @@ * 0 on success or a negative error code on failure. */ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, - struct device *target_dev, +// struct device *target_dev, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; @@ -254,7 +254,8 @@ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); long r; - r = drm_gem_map_attach(dma_buf, target_dev, attach); + //r = drm_gem_map_attach(dma_buf, target_dev, attach); + r = drm_gem_map_attach(dma_buf, attach); if (r) return r; @@ -434,9 +435,9 @@ .release = drm_gem_dmabuf_release, .begin_cpu_access = amdgpu_gem_begin_cpu_access, .map = drm_gem_dmabuf_kmap, - .map_atomic = drm_gem_dmabuf_kmap_atomic, + //.map_atomic = drm_gem_dmabuf_kmap_atomic, .unmap = drm_gem_dmabuf_kunmap, - .unmap_atomic = drm_gem_dmabuf_kunmap_atomic, + //.unmap_atomic = drm_gem_dmabuf_kunmap_atomic, .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, diff -Naur amdgpu-19.10-785425.bak/amd/amdgpu/dce_virtual.c amdgpu-19.10-785425/amd/amdgpu/dce_virtual.c --- amdgpu-19.10-785425.bak/amd/amdgpu/dce_virtual.c 2019-04-17 08:07:59.000000000 +0200 +++ amdgpu-19.10-785425/amd/amdgpu/dce_virtual.c 2019-06-11 13:36:22.560594363 +0200 @@ -658,7 +658,7 @@ drm_connector_register(connector); /* link them */ - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff -Naur amdgpu-19.10-785425.bak/amd/display/amdgpu_dm/amdgpu_dm.c amdgpu-19.10-785425/amd/display/amdgpu_dm/amdgpu_dm.c --- amdgpu-19.10-785425.bak/amd/display/amdgpu_dm/amdgpu_dm.c 2019-04-17 08:07:59.000000000 +0200 +++ amdgpu-19.10-785425/amd/display/amdgpu_dm/amdgpu_dm.c 2019-06-11 13:36:22.564594363 +0200 @@ -1270,14 +1270,14 @@ (struct edid *) sink->dc_edid.raw_edid; - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, aconnector->edid); } amdgpu_dm_update_freesync_caps(connector, aconnector->edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); - drm_mode_connector_update_edid_property(connector, NULL); + drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; @@ -4902,7 +4902,7 @@ link, link_index); - drm_mode_connector_attach_encoder( + drm_connector_attach_encoder( &aconnector->base, &aencoder->base); drm_connector_register(&aconnector->base); diff -Naur amdgpu-19.10-785425.bak/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c amdgpu-19.10-785425/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c --- amdgpu-19.10-785425.bak/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 2019-04-17 08:07:59.000000000 +0200 +++ amdgpu-19.10-785425/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 2019-06-11 13:36:22.564594363 +0200 @@ -172,7 +172,7 @@ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); if (!edid) { - drm_mode_connector_update_edid_property( + drm_connector_update_edid_property( &aconnector->base, NULL); return ret; @@ -206,7 +206,7 @@ connector, aconnector->edid); } - drm_mode_connector_update_edid_property( + drm_connector_update_edid_property( &aconnector->base, aconnector->edid); ret = drm_add_edid_modes(connector, aconnector->edid); @@ -287,7 +287,7 @@ DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n", aconnector, connector->base.id, aconnector->mst_port); aconnector->port = port; - drm_mode_connector_set_path_property(connector, pathprop); + drm_connector_set_path_property(connector, pathprop); drm_modeset_unlock(&dev->mode_config.connection_mutex); aconnector->mst_connected = true; return &aconnector->base; @@ -322,7 +322,7 @@ master->connector_id); aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); - drm_mode_connector_attach_encoder(&aconnector->base, + drm_connector_attach_encoder(&aconnector->base, &aconnector->mst_encoder->base); drm_object_attach_property( @@ -334,7 +334,7 @@ dev->mode_config.tile_property, 0); - drm_mode_connector_set_path_property(connector, pathprop); + drm_connector_set_path_property(connector, pathprop); /* * Initialize connector state before adding the connectror to drm and --- amdgpu-19.10-785425.bak/Makefile 2019-04-17 08:07:59.000000000 +0200 +++ amdgpu-19.10-785425/Makefile 2019-06-11 23:05:03.356742357 +0200 @@ -33,6 +33,9 @@ DRM_VER=$(shell sed -n 's/^VERSION = \(.*\)/\1/p' $(kdir)/Makefile) DRM_PATCH=$(shell sed -n 's/^PATCHLEVEL = \(.*\)/\1/p' $(kdir)/Makefile) endif +else +DRM_VER=$(shell make kernelversion | sed -n 's/\(.*\)\.\(.*\)\.\(.*\)/\1/p') +DRM_PATCH=$(shell make kernelversion | sed -n 's/\(.*\)\.\(.*\)\.\(.*\)/\2/p') endif subdir-ccflags-y += \