Last active
February 18, 2018 17:02
-
-
Save ardemiranda/3ec1b5477d78db7b78a765bcf3437553 to your computer and use it in GitHub Desktop.
Patch to install nvidia drive version 375.26 in CentOS kernel 3.10.0-693.17.1.el7. Fixed dkms error "fatal error: linux/fence.h: No such file or directory"
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- nvidia-drm-fence.c 2016-12-11 02:54:42.000000000 -0200 | |
+++ nvidia-drm-fence.c 2018-02-12 01:52:58.015107155 -0200 | |
@@ -31,7 +31,7 @@ | |
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) | |
struct nv_fence { | |
- struct fence base; | |
+ struct dma_fence base; | |
spinlock_t lock; | |
struct nvidia_drm_device *nv_dev; | |
@@ -51,7 +51,7 @@ | |
static const char *nvidia_drm_gem_prime_fence_op_get_driver_name | |
( | |
- struct fence *fence | |
+ struct dma_fence *fence | |
) | |
{ | |
return "NVIDIA"; | |
@@ -59,7 +59,7 @@ | |
static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name | |
( | |
- struct fence *fence | |
+ struct dma_fence *fence | |
) | |
{ | |
return "nvidia.prime"; | |
@@ -67,7 +67,7 @@ | |
static bool nvidia_drm_gem_prime_fence_op_signaled | |
( | |
- struct fence *fence | |
+ struct dma_fence *fence | |
) | |
{ | |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); | |
@@ -99,7 +99,7 @@ | |
static bool nvidia_drm_gem_prime_fence_op_enable_signaling | |
( | |
- struct fence *fence | |
+ struct dma_fence *fence | |
) | |
{ | |
bool ret = true; | |
@@ -107,7 +107,7 @@ | |
struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem; | |
struct nvidia_drm_device *nv_dev = nv_fence->nv_dev; | |
- if (fence_is_signaled(fence)) | |
+ if (dma_fence_is_signaled(fence)) | |
{ | |
return false; | |
} | |
@@ -132,7 +132,7 @@ | |
} | |
nv_gem->fenceContext.softFence = fence; | |
- fence_get(fence); | |
+ dma_fence_get(fence); | |
unlock_struct_mutex: | |
mutex_unlock(&nv_dev->dev->struct_mutex); | |
@@ -142,7 +142,7 @@ | |
static void nvidia_drm_gem_prime_fence_op_release | |
( | |
- struct fence *fence | |
+ struct dma_fence *fence | |
) | |
{ | |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); | |
@@ -151,7 +151,7 @@ | |
static signed long nvidia_drm_gem_prime_fence_op_wait | |
( | |
- struct fence *fence, | |
+ struct dma_fence *fence, | |
bool intr, | |
signed long timeout | |
) | |
@@ -166,12 +166,12 @@ | |
* that it should never get hit during normal operation, but not so long | |
* that the system becomes unresponsive. | |
*/ | |
- return fence_default_wait(fence, intr, | |
+ return dma_fence_default_wait(fence, intr, | |
(timeout == MAX_SCHEDULE_TIMEOUT) ? | |
msecs_to_jiffies(96) : timeout); | |
} | |
-static const struct fence_ops nvidia_drm_gem_prime_fence_ops = { | |
+static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = { | |
.get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name, | |
.get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name, | |
.signaled = nvidia_drm_gem_prime_fence_op_signaled, | |
@@ -281,7 +281,7 @@ | |
bool force | |
) | |
{ | |
- struct fence *fence = nv_gem->fenceContext.softFence; | |
+ struct dma_fence *fence = nv_gem->fenceContext.softFence; | |
WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex)); | |
@@ -297,10 +297,10 @@ | |
if (force || nv_fence_ready_to_signal(nv_fence)) | |
{ | |
- fence_signal(&nv_fence->base); | |
+ dma_fence_signal(&nv_fence->base); | |
nv_gem->fenceContext.softFence = NULL; | |
- fence_put(&nv_fence->base); | |
+ dma_fence_put(&nv_fence->base); | |
nvKms->disableChannelEvent(nv_dev->pDevice, | |
nv_gem->fenceContext.cb); | |
@@ -316,7 +316,7 @@ | |
nv_fence = container_of(fence, struct nv_fence, base); | |
- fence_signal(&nv_fence->base); | |
+ dma_fence_signal(&nv_fence->base); | |
} | |
} | |
@@ -509,7 +509,7 @@ | |
* fence_context_alloc() cannot fail, so we do not need to check a return | |
* value. | |
*/ | |
- nv_gem->fenceContext.context = fence_context_alloc(1); | |
+ nv_gem->fenceContext.context = dma_fence_context_alloc(1); | |
ret = nvidia_drm_gem_prime_fence_import_semaphore( | |
nv_dev, nv_gem, p->index, | |
@@ -666,13 +666,13 @@ | |
nv_fence->nv_gem = nv_gem; | |
spin_lock_init(&nv_fence->lock); | |
- fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, | |
+ dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, | |
&nv_fence->lock, nv_gem->fenceContext.context, | |
p->sem_thresh); | |
reservation_object_add_excl_fence(&nv_gem->fenceContext.resv, | |
&nv_fence->base); | |
- fence_put(&nv_fence->base); /* Reservation object has reference */ | |
+ dma_fence_put(&nv_fence->base); /* Reservation object has reference */ | |
ret = 0; | |
--- nvidia-drm-modeset.c 2016-12-11 02:54:42.000000000 -0200 | |
+++ nvidia-drm-modeset.c 2018-02-12 01:57:15.028117605 -0200 | |
@@ -983,7 +983,7 @@ | |
* drm_atomic_commit(). | |
*/ | |
if (ret != 0) { | |
- drm_atomic_state_free(state); | |
+ __drm_atomic_state_free(state); | |
} | |
drm_modeset_unlock_all(dev); | |
--- nvidia-drm-priv.h 2016-12-11 02:54:42.000000000 -0200 | |
+++ nvidia-drm-priv.h 2018-02-12 01:45:52.127089838 -0200 | |
@@ -34,7 +34,7 @@ | |
#endif | |
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) | |
-#include <linux/fence.h> | |
+#include <linux/dma-fence.h> | |
#include <linux/reservation.h> | |
#endif | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment