Skip to content

Instantly share code, notes, and snippets.

@pimlie
Last active August 1, 2017 08:35
Show Gist options
  • Save pimlie/064475bbd858995feb38d620d17e2437 to your computer and use it in GitHub Desktop.
Save pimlie/064475bbd858995feb38d620d17e2437 to your computer and use it in GitHub Desktop.
Patch to run amdgpu-pro 16.50 on a v4.9 Ubuntu mainline kernel
diff --git a/amd/amdgpu/amdgpu_acp.c b/amd/amdgpu/amdgpu_acp.c
index 252edba..892d60f 100644
--- a/amd/amdgpu/amdgpu_acp.c
+++ b/amd/amdgpu/amdgpu_acp.c
@@ -421,29 +421,6 @@ static int acp_suspend(void *handle)
static int acp_resume(void *handle)
{
- int i, ret;
- struct acp_pm_domain *apd;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- /* return early if no ACP */
- if (!adev->acp.acp_genpd)
- return 0;
-
- /* SMU block will power on ACP irrespective of ACP runtime status.
- * Power off explicitly based on genpd ACP runtime status so that ACP
- * hw and ACP-genpd status are in sync.
- * 'suspend_power_off' represents "Power status before system suspend"
- */
- if (adev->acp.acp_genpd->gpd.suspend_power_off == true) {
- apd = container_of(&adev->acp.acp_genpd->gpd,
- struct acp_pm_domain, gpd);
-
- for (i = 4; i >= 0 ; i--) {
- ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
- if (ret)
- pr_err("ACP tile %d tile suspend failed\n", i);
- }
- }
return 0;
}
diff --git a/amd/amdgpu/amdgpu_connectors.c b/amd/amdgpu/amdgpu_connectors.c
index 818d589..cb78d1c 100644
--- a/amd/amdgpu/amdgpu_connectors.c
+++ b/amd/amdgpu/amdgpu_connectors.c
@@ -168,12 +168,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
}
/* Any defined maximum tmds clock limit we must not exceed? */
- if (connector->max_tmds_clock > 0) {
+ if (connector->display_info.max_tmds_clock > 0) {
/* mode_clock is clock in kHz for mode to be modeset on this connector */
mode_clock = amdgpu_connector->pixelclock_for_modeset;
/* Maximum allowable input clock in kHz */
- max_tmds_clock = connector->max_tmds_clock * 1000;
+ max_tmds_clock = connector->display_info.max_tmds_clock * 1000;
DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
connector->name, mode_clock, max_tmds_clock);
diff --git a/amd/amdgpu/amdgpu_display.c b/amd/amdgpu/amdgpu_display.c
index 8425b1d..b613a42 100644
--- a/amd/amdgpu/amdgpu_display.c
+++ b/amd/amdgpu/amdgpu_display.c
@@ -268,7 +268,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
work->base = base;
- r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id);
+ r = drm_crtc_vblank_get(crtc);
if (r) {
DRM_ERROR("failed to get vblank before flip\n");
goto pflip_cleanup;
@@ -296,7 +296,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
return 0;
vblank_cleanup:
- drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(crtc);
pflip_cleanup:
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
diff --git a/amd/amdgpu/amdgpu_sa.c b/amd/amdgpu/amdgpu_sa.c
index 052f745..c6e32b6 100644
--- a/amd/amdgpu/amdgpu_sa.c
+++ b/amd/amdgpu/amdgpu_sa.c
@@ -428,13 +428,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
soffset, eoffset, eoffset - soffset);
if (i->fence)
-#if defined(BUILD_AS_DKMS)
- seq_printf(m, " protected by 0x%08x on context %d",
- i->fence->seqno, i->fence->context);
-#else
seq_printf(m, " protected by 0x%08x on context %llu",
i->fence->seqno, i->fence->context);
-#endif
seq_printf(m, "\n");
}
diff --git a/amd/amdgpu/amdgpu_ttm.c b/amd/amdgpu/amdgpu_ttm.c
index 89760f8..b883330 100644
--- a/amd/amdgpu/amdgpu_ttm.c
+++ b/amd/amdgpu/amdgpu_ttm.c
@@ -251,11 +251,15 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
+ struct drm_file *file_priv;
struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&abo->gem_base.vma_node, filp);
+
+ file_priv = filp->private_data;
+
+ return drm_vma_node_verify_access(&abo->gem_base.vma_node, file_priv);
}
static void amdgpu_move_null(struct ttm_buffer_object *bo,
diff --git a/amd/amdgpu/dce_v10_0.c b/amd/amdgpu/dce_v10_0.c
index 650d193..f8b079e 100644
--- a/amd/amdgpu/dce_v10_0.c
+++ b/amd/amdgpu/dce_v10_0.c
@@ -2627,19 +2627,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v10_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
@@ -3342,7 +3344,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/amd/amdgpu/dce_v11_0.c b/amd/amdgpu/dce_v11_0.c
index ca03d8e..cfe763f 100644
--- a/amd/amdgpu/dce_v11_0.c
+++ b/amd/amdgpu/dce_v11_0.c
@@ -2643,19 +2643,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v11_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
@@ -3391,7 +3393,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/amd/amdgpu/dce_v6_0.c b/amd/amdgpu/dce_v6_0.c
index fd3eeb0..384f8fe 100644
--- a/amd/amdgpu/dce_v6_0.c
+++ b/amd/amdgpu/dce_v6_0.c
@@ -1946,19 +1946,21 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v6_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
diff --git a/amd/amdgpu/dce_v8_0.c b/amd/amdgpu/dce_v8_0.c
index 8e4dff7..115b230 100644
--- a/amd/amdgpu/dce_v8_0.c
+++ b/amd/amdgpu/dce_v8_0.c
@@ -2478,19 +2478,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v8_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
@@ -3252,7 +3254,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/amd/amdgpu/dce_virtual.c b/amd/amdgpu/dce_virtual.c
index 0c6e873..4a93121 100644
--- a/amd/amdgpu/dce_virtual.c
+++ b/amd/amdgpu/dce_virtual.c
@@ -152,18 +152,20 @@ static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
return;
}
-static void dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
+
+ return 0;
}
static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
@@ -746,11 +748,11 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
/* wakeup usersapce */
if (works->event)
- drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/amd/backport/include/kcl/kcl_mm.h b/amd/backport/include/kcl/kcl_mm.h
index 2695f7a..3a0fc0f 100644
--- a/amd/backport/include/kcl/kcl_mm.h
+++ b/amd/backport/include/kcl/kcl_mm.h
@@ -8,7 +8,15 @@ static inline int kcl_get_user_pages(struct task_struct *tsk, struct mm_struct *
int write, int force, struct page **pages,
struct vm_area_struct **vmas)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ unsigned int gup_flags = 0;
+ if (write)
+ gup_flags |= FOLL_WRITE;
+ if (force)
+ gup_flags |= FOLL_FORCE;
+
+ return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
return get_user_pages(start, nr_pages, write, force, pages, vmas);
#else
return get_user_pages(tsk, mm, start, nr_pages,
diff --git a/amd/backport/include/kcl/kcl_ttm.h b/amd/backport/include/kcl/kcl_ttm.h
index 6e5a170..75b18f2 100644
--- a/amd/backport/include/kcl/kcl_ttm.h
+++ b/amd/backport/include/kcl/kcl_ttm.h
@@ -112,11 +112,7 @@ static inline int kcl_ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
-#if defined(BUILD_AS_DKMS)
- return ttm_bo_reserve(bo, interruptible, no_wait, false, ticket);
-#else
return ttm_bo_reserve(bo, interruptible, no_wait, ticket);
-#endif
}
static inline void kcl_ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
@@ -148,12 +144,7 @@ static inline int kcl_ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
-#if defined(BUILD_AS_DKMS)
- return ttm_bo_move_accel_cleanup(bo, fence,
- evict, no_wait_gpu, new_mem);
-#else
return ttm_bo_move_accel_cleanup(bo, fence, evict, new_mem);
-#endif
}
static inline int kcl_ttm_bo_wait(struct ttm_buffer_object *bo,
diff --git a/amd/backport/kcl_drm.c b/amd/backport/kcl_drm.c
index 27d4aaa..c60c464 100644
--- a/amd/backport/kcl_drm.c
+++ b/amd/backport/kcl_drm.c
@@ -178,7 +178,7 @@ static inline struct drm_plane_state *
_kcl_drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
- return state->plane_states[drm_plane_index(plane)];
+ return state->planes[drm_plane_index(plane)].state;
}
void
@@ -442,4 +442,4 @@ void kcl_drm_init(void)
_kcl_drm_atomic_helper_update_legacy_modeset_state = amdgpu_kcl_fp_setup(
"drm_atomic_helper_update_legacy_modeset_state",
_kcl_drm_atomic_helper_update_legacy_modeset_state_stub);
-}
\ No newline at end of file
+}
diff --git a/amd/backport/kcl_fence_array.c b/amd/backport/kcl_fence_array.c
index d7ee15c..7eacba4 100644
--- a/amd/backport/kcl_fence_array.c
+++ b/amd/backport/kcl_fence_array.c
@@ -21,7 +21,7 @@
#include <linux/export.h>
#include <linux/slab.h>
-#if defined(BUILD_AS_DKMS)
+#if defined(BUILD_AS_DKMS) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
static const char *fence_array_get_driver_name(struct fence *fence)
diff --git a/amd/dal/amdgpu_dm/amdgpu_dm.c b/amd/dal/amdgpu_dm/amdgpu_dm.c
index c5fcf5a..297b545 100644
--- a/amd/dal/amdgpu_dm/amdgpu_dm.c
+++ b/amd/dal/amdgpu_dm/amdgpu_dm.c
@@ -213,9 +213,8 @@ static void dm_pflip_high_irq(void *interrupt_params)
/* wakeup usersapce */
if(works->event)
- drm_send_vblank_event(
- adev->ddev,
- amdgpu_crtc->crtc_id,
+ drm_crtc_send_vblank_event(
+ &amdgpu_crtc->base,
works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
diff --git a/amd/dal/amdgpu_dm/amdgpu_dm_types.c b/amd/dal/amdgpu_dm/amdgpu_dm_types.c
index edc8e86..0b38016 100644
--- a/amd/dal/amdgpu_dm/amdgpu_dm_types.c
+++ b/amd/dal/amdgpu_dm/amdgpu_dm_types.c
@@ -998,12 +998,11 @@ void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
-static void amdgpu_dm_atomic_crtc_gamma_set(
+static int amdgpu_dm_atomic_crtc_gamma_set(
struct drm_crtc *crtc,
u16 *red,
u16 *green,
u16 *blue,
- uint32_t start,
uint32_t size)
{
struct drm_device *dev = crtc->dev;
@@ -1012,6 +1011,8 @@ static void amdgpu_dm_atomic_crtc_gamma_set(
crtc->state->mode.private_flags |= AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET;
drm_atomic_helper_crtc_set_property(crtc, prop, 0);
+
+ return 0;
}
static int dm_crtc_funcs_atomic_set_property(
@@ -1675,6 +1676,10 @@ static int dm_plane_helper_prepare_fb(
struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *new_state)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+static int dm_plane_helper_prepare_fb(
+ struct drm_plane *plane,
+ struct drm_plane_state *new_state)
#else
static int dm_plane_helper_prepare_fb(
struct drm_plane *plane,
@@ -1718,6 +1723,10 @@ static void dm_plane_helper_cleanup_fb(
struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *old_state)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+static void dm_plane_helper_cleanup_fb(
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state)
#else
static void dm_plane_helper_cleanup_fb(
struct drm_plane *plane,
@@ -2516,7 +2525,7 @@ int amdgpu_dm_atomic_commit(
* the software side now.
*/
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
/*
* From this point state become old state really. New state is
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment