Skip to content

Instantly share code, notes, and snippets.

@overcq
Last active June 29, 2024 22:24
Show Gist options
  • Save overcq/7e38a640a4f01fb7c558937aedc80e71 to your computer and use it in GitHub Desktop.
Save overcq/7e38a640a4f01fb7c558937aedc80e71 to your computer and use it in GitHub Desktop.
Patch to work old nvidia-drivers in newest Linux kernel
--- a/kernel/common/inc/nv-procfs.h
+++ b/kernel/common/inc/nv-procfs.h
@@ -113,7 +113,7 @@
})
#if defined(NV_PDE_DATA_PRESENT)
-# define NV_PDE_DATA(inode) PDE_DATA(inode)
+# define NV_PDE_DATA(inode) pde_data(inode)
#else
# define NV_PDE_DATA(inode) PDE(inode)->data
#endif
--- a/kernel/common/inc/nv-time.h
+++ b/kernel/common/inc/nv-time.h
@@ -38,9 +38,6 @@
static inline void nv_gettimeofday(nv_timeval *tv)
{
-#ifdef NV_DO_GETTIMEOFDAY_PRESENT
- do_gettimeofday(tv);
-#else
struct timespec64 now;
ktime_get_real_ts64(&now);
@@ -49,7 +46,6 @@
.tv_sec = now.tv_sec,
.tv_usec = now.tv_nsec/1000,
};
-#endif // NV_DO_GETTIMEOFDAY_PRESENT
}
#endif // __NV_TIME_H__
--- a/kernel/common/inc/nv-mm.h
+++ b/kernel/common/inc/nv-mm.h
@@ -48,10 +48,10 @@
*/
#if defined(NV_GET_USER_PAGES_HAS_TASK_STRUCT)
- #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
- #define NV_GET_USER_PAGES(start, nr_pages, write, force, pages, vmas) \
- get_user_pages(current, current->mm, start, nr_pages, write, force, pages, vmas)
- #else
+// #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
+// #define NV_GET_USER_PAGES(start, nr_pages, write, force, pages, vmas) \
+// get_user_pages(start, nr_pages, write, pages)
+// #else
#include <linux/mm.h>
#include <linux/sched.h>
@@ -69,10 +69,10 @@
if (force)
flags |= FOLL_FORCE;
- return get_user_pages(current, current->mm, start, nr_pages, flags,
- pages, vmas);
+ return get_user_pages(start, nr_pages, flags,
+ pages);
}
- #endif
+// #endif
#else
#if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
#define NV_GET_USER_PAGES get_user_pages
@@ -161,8 +161,8 @@
#else
- return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
- pages, vmas);
+ return get_user_pages_remote(mm, start, nr_pages, flags,
+ pages, NULL);
#endif
@@ -191,7 +191,7 @@
if (force)
flags |= FOLL_FORCE;
- return get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+ return get_user_pages(start, nr_pages, flags, pages);
}
#endif
#endif
--- a/kernel/common/inc/nv-linux.h
+++ b/kernel/common/inc/nv-linux.h
@@ -132,6 +132,8 @@
#include <asm/page.h> /* PAGE_OFFSET */
#include <asm/pgtable.h> /* pte bit definitions */
+#include <linux/dma-direct.h>
+
#include "nv-list-helpers.h"
/*
@@ -195,9 +197,8 @@
#include <linux/workqueue.h> /* workqueue */
#include <nv-kthread-q.h> /* kthread based queue */
-#if defined(NV_LINUX_EFI_H_PRESENT)
#include <linux/efi.h> /* efi_enabled */
-#endif
+
#if defined(NV_LINUX_SCREEN_INFO_H_PRESENT)
#include <linux/screen_info.h> /* screen_info */
#else
@@ -209,17 +210,7 @@
#include <asm-generic/pci-dma-compat.h>
#endif
-#if defined(NV_EFI_ENABLED_PRESENT) && defined(NV_EFI_ENABLED_ARGUMENT_COUNT)
-#if (NV_EFI_ENABLED_ARGUMENT_COUNT == 1)
#define NV_EFI_ENABLED() efi_enabled(EFI_BOOT)
-#else
-#error "NV_EFI_ENABLED_ARGUMENT_COUNT value unrecognized!"
-#endif
-#elif (defined(NV_EFI_ENABLED_PRESENT) || defined(efi_enabled))
-#define NV_EFI_ENABLED() efi_enabled
-#else
-#define NV_EFI_ENABLED() 0
-#endif
#if defined(CONFIG_CRAY_XT)
#include <cray/cray_nvidia.h>
@@ -337,19 +328,8 @@
#include <linux/ipmi.h>
#endif
-#if defined(NV_PCI_DMA_MAPPING_ERROR_PRESENT)
-#if (NV_PCI_DMA_MAPPING_ERROR_ARGUMENT_COUNT == 2)
-#define NV_PCI_DMA_MAPPING_ERROR(dev, addr) \
- pci_dma_mapping_error(dev, addr)
-#elif (NV_PCI_DMA_MAPPING_ERROR_ARGUMENT_COUNT == 1)
-#define NV_PCI_DMA_MAPPING_ERROR(dev, addr) \
- pci_dma_mapping_error(addr)
-#else
-#error "NV_PCI_DMA_MAPPING_ERROR_ARGUMENT_COUNT value unrecognized!"
-#endif
-#elif defined(NV_VM_INSERT_PAGE_PRESENT)
-#error "NV_PCI_DMA_MAPPING_ERROR() undefined!"
-#endif
+#define NV_DMA_MAPPING_ERROR(dev, addr) \
+ dma_mapping_error(dev, addr)
#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED)
#if (NV_ACPI_WALK_NAMESPACE_ARGUMENT_COUNT == 6)
@@ -558,18 +538,6 @@
return ptr;
}
-static inline void *nv_ioremap_nocache(NvU64 phys, NvU64 size)
-{
-#if defined(NV_IOREMAP_NOCACHE_PRESENT)
- void *ptr = ioremap_nocache(phys, size);
-#else
- void *ptr = ioremap(phys, size);
-#endif
- if (ptr)
- NV_MEMDBG_ADD(ptr, size);
- return ptr;
-}
-
static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size)
{
#if defined(NV_IOREMAP_CACHE_PRESENT)
@@ -590,7 +558,7 @@
NV_MEMDBG_ADD(ptr, size);
return ptr;
#else
- return nv_ioremap_nocache(phys, size);
+ return nv_ioremap(phys, size);
#endif
}
@@ -989,15 +957,8 @@
* Early 2.6 kernels have acquire_console_sem, but from 2.6.38+ it was
* renamed to console_lock.
*/
-#if defined(NV_ACQUIRE_CONSOLE_SEM_PRESENT)
-#define NV_ACQUIRE_CONSOLE_SEM() acquire_console_sem()
-#define NV_RELEASE_CONSOLE_SEM() release_console_sem()
-#elif defined(NV_CONSOLE_LOCK_PRESENT)
#define NV_ACQUIRE_CONSOLE_SEM() console_lock()
#define NV_RELEASE_CONSOLE_SEM() console_unlock()
-#else
-#error "console lock api unrecognized!."
-#endif
/*
* If the host OS has page sizes larger than 4KB, we may have a security
@@ -1289,11 +1250,6 @@
{
NvBool is_direct = NV_FALSE;
-#if defined(NV_DMA_IS_DIRECT_PRESENT)
- if (dma_is_direct(get_dma_ops(dev)))
- is_direct = NV_TRUE;
-#endif
-
return is_direct;
}
--- a/kernel/nvidia-uvm/uvm8_va_space.c
+++ b/kernel/nvidia-uvm/uvm8_va_space.c
@@ -56,41 +56,11 @@
static NV_STATUS init_npu_context(uvm_va_space_t *va_space, uvm_gpu_va_space_t *gpu_va_space)
{
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
- if (uvm8_ats_mode) {
- struct npu_context *npu_context;
-
- // TODO: Bug 1896767: Add a callback here. See the comments on unsafe_mm
- // in uvm8_va_space.h.
- UVM_ASSERT(current->mm == va_space->unsafe_mm);
- uvm_assert_mmap_lock_locked_write(current->mm);
-
- npu_context = pnv_npu2_init_context(gpu_va_space->gpu->pci_dev, (MSR_DR | MSR_PR | MSR_HV), NULL, NULL);
- if (IS_ERR(npu_context)) {
- NV_STATUS status = errno_to_nv_status(PTR_ERR(npu_context));
- UVM_ERR_PRINT("pnv_npu2_init_context failed: %s (%d), GPU %s\n",
- nvstatusToString(status),
- (int)PTR_ERR(npu_context),
- gpu_va_space->gpu->name);
- return status;
- }
-
- gpu_va_space->npu_context = npu_context;
- }
-#endif
-
return NV_OK;
}
static void destroy_npu_context(uvm_gpu_va_space_t *gpu_va_space)
{
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
- if (uvm8_ats_mode && gpu_va_space->npu_context) {
- // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h
- pnv_npu2_destroy_context(gpu_va_space->npu_context, gpu_va_space->gpu->pci_dev);
- gpu_va_space->npu_context = NULL;
- }
-#endif
}
static NV_STATUS register_gpu_nvlink_peers(uvm_va_space_t *va_space, uvm_gpu_t *gpu)
@@ -171,15 +141,6 @@
uvm_processor_mask_set(&va_space->system_wide_atomics_enabled_processors, UVM_CPU_ID);
uvm_processor_mask_set(&va_space->faultable_processors, UVM_CPU_ID);
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
- if (uvm8_ats_mode) {
- // TODO: Bug 1896767: Be as retrictive as possible when using
- // unsafe_mm. See the comments on unsafe_mm in
- // uvm8_va_space.h.
- va_space->unsafe_mm = current->mm;
- }
-#endif
-
filp->private_data = va_space;
filp->f_mapping = &va_space->mapping;
--- a/kernel/nvidia-uvm/uvm8.c
+++ b/kernel/nvidia-uvm/uvm8.c
@@ -658,7 +658,7 @@
// Using VM_DONTCOPY would be nice, but madvise(MADV_DOFORK) can reset that
// so we have to handle vm_open on fork anyway. We could disable MADV_DOFORK
// with VM_IO, but that causes other mapping issues.
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vm_flags_set( vma, VM_MIXEDMAP | VM_DONTEXPAND );
vma->vm_ops = &uvm_vm_ops_managed;
--- a/kernel/nvidia-uvm/uvm8_lock.h
+++ b/kernel/nvidia-uvm/uvm8_lock.h
@@ -898,7 +898,7 @@
{
int res;
- res = UVM_WAIT_ON_BIT_LOCK(bit_locks->bits, bit, TASK_UNINTERRUPTIBLE);
+ res = UVM_WAIT_ON_BIT(bit_locks->bits, bit, TASK_UNINTERRUPTIBLE);
UVM_ASSERT_MSG(res == 0, "Uninterruptible task interrupted: %d\n", res);
uvm_assert_bit_locked(bit_locks, bit);
}
--- a/kernel/nvidia-uvm/uvm8_gpu.c
+++ b/kernel/nvidia-uvm/uvm8_gpu.c
@@ -2214,15 +2214,15 @@
NV_STATUS uvm_gpu_map_cpu_pages(uvm_gpu_t *gpu, struct page *page, size_t size, NvU64 *dma_addr_out)
{
- NvU64 dma_addr = pci_map_page(gpu->pci_dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
+ NvU64 dma_addr = dma_map_page(&((struct pci_dev *)gpu->pci_dev)->dev, page, 0, size, DMA_BIDIRECTIONAL);
UVM_ASSERT(PAGE_ALIGNED(size));
- if (NV_PCI_DMA_MAPPING_ERROR(gpu->pci_dev, dma_addr))
+ if (NV_DMA_MAPPING_ERROR(&((struct pci_dev *)gpu->pci_dev)->dev, dma_addr))
return NV_ERR_OPERATING_SYSTEM;
if (dma_addr < gpu->dma_addressable_start || dma_addr + size - 1 > gpu->dma_addressable_limit) {
- pci_unmap_page(gpu->pci_dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&((struct pci_dev *)gpu->pci_dev)->dev, dma_addr, size, DMA_BIDIRECTIONAL);
UVM_ERR_PRINT_RL("PCI mapped range [0x%llx, 0x%llx) not in the addressable range [0x%llx, 0x%llx), GPU %s\n",
dma_addr,
dma_addr + (NvU64)size,
@@ -2256,7 +2256,7 @@
if (gpu->npu_dev)
dma_address = nv_expand_nvlink_addr(dma_address);
dma_address += gpu->dma_addressable_start;
- pci_unmap_page(gpu->pci_dev, dma_address, size, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&((struct pci_dev *)gpu->pci_dev)->dev, dma_address, size, DMA_BIDIRECTIONAL);
atomic64_sub(size, &gpu->mapped_cpu_pages_size);
}
--- a/kernel/nvidia-uvm/uvm8_global.c
+++ b/kernel/nvidia-uvm/uvm8_global.c
@@ -70,14 +70,8 @@
NV_STATUS status = NV_ERR_GENERIC;
if (uvm8_ats_mode) {
- #if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
- pr_info("UVM ATS is enabled but not yet fully supported. "
- "All GPU applications even if they don't use ATS must cleanly stop GPU work before exiting (no ctrl-c) "
- "or risk crashing the system.");
- #else
pr_info("This platform does not support ATS. Ignoring uvm8_ats_mode.\n");
uvm8_ats_mode = 0;
- #endif
}
status = uvm_kvmalloc_init();
--- a/kernel/nvidia-uvm/uvm_linux.h
+++ b/kernel/nvidia-uvm/uvm_linux.h
@@ -66,10 +66,6 @@
#include <linux/ratelimit.h>
#endif
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
-#include <asm/powernv.h>
-#endif
-
#if defined(NV_LINUX_SCHED_TASK_STACK_H_PRESENT)
#include <linux/sched/task_stack.h>
#endif
@@ -90,6 +86,8 @@
#define UVM_IS_CONFIG_HMM() 0
#endif
+#include <linux/types.h>
+
// See bug 1707453 for further details about setting the minimum kernel version.
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
# error This driver does not support kernels older than 2.6.32!
@@ -267,18 +265,10 @@
#define nv_copy_from_user copy_from_user
#endif
-#if defined(NV_ATOMIC64_PRESENT)
typedef atomic64_t NV_ATOMIC64;
#define NV_ATOMIC64_INC(data) atomic64_inc(&(data))
#define NV_ATOMIC64_SET(data,val) atomic64_set(&(data), (val))
#define NV_ATOMIC64_READ(data) atomic64_read(&(data))
-#else
-#warning "atomic64_t unavailable, demoting to atomic_t!"
-typedef atomic_t NV_ATOMIC64;
-#define NV_ATOMIC64_INC(data) atomic_inc(&(data))
-#define NV_ATOMIC64_SET(data,val) atomic_set(&(data), (val))
-#define NV_ATOMIC64_READ(data) atomic_read(&(data))
-#endif
#ifndef NV_ALIGN_DOWN
#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))
@@ -469,26 +459,8 @@
#endif
// Changed in 3.17 via commit 743162013d40ca612b4cb53d3a200dff2d9ab26e
-#if (NV_WAIT_ON_BIT_LOCK_ARGUMENT_COUNT == 3)
- #define UVM_WAIT_ON_BIT_LOCK(word, bit, mode) \
- wait_on_bit_lock(word, bit, mode)
-#elif (NV_WAIT_ON_BIT_LOCK_ARGUMENT_COUNT == 4)
- static __sched int uvm_bit_wait(void *word)
- {
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0))
- if (signal_pending_state(current->state, current))
-#else
- if (signal_pending_state(current->__state, current))
-#endif
- return 1;
- schedule();
- return 0;
- }
- #define UVM_WAIT_ON_BIT_LOCK(word, bit, mode) \
- wait_on_bit_lock(word, bit, uvm_bit_wait, mode)
-#else
-#error "Unknown number of arguments"
-#endif
+ #define UVM_WAIT_ON_BIT(word, bit, mode) \
+ wait_on_bit(word, bit, mode)
static void uvm_init_radix_tree_preloadable(struct radix_tree_root *tree)
{
--- a/kernel/nvidia-uvm/uvm8_va_space.h
+++ b/kernel/nvidia-uvm/uvm8_va_space.h
@@ -116,11 +116,6 @@
// accessed across dropping and re-acquiring the VA space lock.
nv_kref_t kref;
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
- // IBM NPU contexts
- struct npu_context *npu_context;
-#endif // NV_PNV_NPU2_INIT_CONTEXT_PRESENT
-
// Each GPU VA space can have ATS enabled or disabled in its hardware state.
// This is controlled by user space when it allocates that GPU VA space
// object from RM. This flag indicates the mode user space requested when
@@ -275,25 +270,6 @@
bool test_page_prefetch_enabled;
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
- // TODO: Bug 1896767: This is an unsafe temporary ATS bringup hack to
- // unblock testing while we get the proper fix in place.
- //
- // This field tracks the mm used to create the uvm_va_space_t. It
- // is used by the GPU ATS fault handler to service faults.
- // However, we aren't yet registering a callback with
- // pnv_npu2_init_context, which means that the GPU may continue
- // this mm's PASID for ATS translations, and that the GPU fault
- // handler may attempt to use this mm_struct after it has been
- // torn down. Either of these issues may lead to a system crash.
- //
- // Until we can handle this properly, if uvm8_ats_mode is enabled
- // we require that all tests exit cleanly after all their GPU work
- // has completed (no ctrl-c for example). This is true even for
- // tests which do not use ATS.
- struct mm_struct *unsafe_mm;
-#endif
-
#if UVM_IS_CONFIG_HMM()
// HMM information about this VA space.
uvm_hmm_va_space_t hmm_va_space;
--- a/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c
+++ b/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c
@@ -1288,159 +1288,19 @@
FAULT_SERVICE_MODE_CANCEL,
} fault_service_mode_t;
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
-static NV_STATUS uvm_ats_service_fault_entry(uvm_gpu_t *gpu,
- NvU32 entry_index,
- uvm_fault_service_batch_context_t *batch_context)
-{
- uvm_gpu_va_space_t *gpu_va_space;
- uvm_fault_buffer_entry_t *previous_entry = NULL;
- uvm_fault_buffer_entry_t *current_entry = batch_context->ordered_fault_cache[entry_index];
- uvm_fault_utlb_info_t *utlb = &batch_context->utlbs[current_entry->fault_source.utlb_id];
- unsigned long flags;
- uintptr_t addr;
- unsigned long fault_status = 0;
- NV_STATUS status = NV_OK;
- bool is_duplicate = false;
- uvm_fault_access_type_t service_access_type;
-
- UVM_ASSERT(current_entry->fault_access_type ==
- uvm_fault_access_type_mask_highest(current_entry->access_type_mask));
-
- if (entry_index > 0)
- previous_entry = batch_context->ordered_fault_cache[entry_index - 1];
-
- // This is a bringup driver, so do some verbose printing:
- uvm_hal_print_fault_entry(current_entry);
-
- // Service the most intrusive fault per page, only. Waive the rest.
- if ((entry_index > 0) && (current_entry->va_space == previous_entry->va_space) &&
- (current_entry->fault_address == previous_entry->fault_address)) {
- // Propagate the is_invalid_prefetch flag across all prefetch faults on the page
- if (previous_entry->is_invalid_prefetch)
- current_entry->is_invalid_prefetch = true;
-
- // If a page is throttled, all faults on the page must be skipped
- if (previous_entry->is_throttled)
- current_entry->is_throttled = true;
-
- is_duplicate = true;
- }
-
- // Generate fault events for all fault packets
- uvm_perf_event_notify_gpu_fault(&current_entry->va_space->perf_events,
- NULL,
- gpu->id,
- current_entry,
- 0,
- is_duplicate);
-
- // The previous fault was non-fatal so the page has been already serviced
- if (is_duplicate && !previous_entry->is_fatal)
- goto skip_fault;
-
- // TODO: Bug 1896767: Combine ATS and non-ATS fault servicing so we can
- // handle both types of faults in the same GPU VA space.
- gpu_va_space = uvm_gpu_va_space_get(current_entry->va_space, gpu);
-
- service_access_type = current_entry->fault_access_type;
-
- // TODO: Bug 1896767: Service more than a single fault at a time
- flags = (unsigned long)((service_access_type >= UVM_FAULT_ACCESS_TYPE_WRITE) ? NPU2_WRITE : 0);
- addr = (uintptr_t)current_entry->fault_address;
- if (gpu_va_space->ats_enabled) {
- status = errno_to_nv_status(pnv_npu2_handle_fault(gpu_va_space->npu_context, &addr, &flags, &fault_status, 1));
- }
- else {
- pr_info_ratelimited("Support is not yet implemented for faults in non-ATS GPU VA spaces "
- "when loading with uvm8_ats_mode=1, cancelling.\n");
- status = NV_ERR_INVALID_ADDRESS;
- }
-
- // Do not flag prefetch faults as fatal unless something fatal happened
- if (status == NV_ERR_INVALID_ADDRESS) {
- if (current_entry->fault_access_type != UVM_FAULT_ACCESS_TYPE_PREFETCH) {
- current_entry->is_fatal = true;
- current_entry->fatal_reason = uvm_tools_status_to_fatal_fault_reason(status);
-
- if (service_access_type == UVM_FAULT_ACCESS_TYPE_READ)
- current_entry->cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL;
- else
- current_entry->cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_WRITE_AND_ATOMIC;
-
- // If there are pending read accesses on the same page, we have to
- // service them before we can cancel the write/atomic faults. So we
- // retry with read fault access type.
- if (gpu_va_space->ats_enabled &&
- current_entry->fault_access_type > UVM_FAULT_ACCESS_TYPE_READ &&
- uvm_fault_access_type_mask_test(current_entry->access_type_mask, UVM_FAULT_ACCESS_TYPE_READ)) {
- flags = 0;
- status = errno_to_nv_status(pnv_npu2_handle_fault(gpu_va_space->npu_context,
- &addr,
- &flags,
- &fault_status,
- 1));
- // If read accesses also fail,
- if (status != NV_OK)
- current_entry->cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL;
- }
- }
- else {
- current_entry->is_invalid_prefetch = true;
- }
-
- // Do not fail overall fault servicing due to logical errors
- status = NV_OK;
- }
-
-skip_fault:
- if (current_entry->is_invalid_prefetch)
- batch_context->num_invalid_prefetch_faults += current_entry->num_instances;
-
- batch_context->num_duplicate_faults += current_entry->num_instances - 1;
-
- if (current_entry->is_throttled)
- batch_context->has_throttled_faults = true;
-
- if (current_entry->is_fatal) {
- utlb->has_fatal_faults = true;
- batch_context->has_fatal_faults = true;
- }
-
- return status;
-}
-#endif
-
static void uvm_down_read_mmap_lock_unsafe(uvm_va_space_t *va_space)
{
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
- // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
- // We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
- if (va_space->unsafe_mm)
- uvm_down_read_mmap_lock(va_space->unsafe_mm);
-#endif
}
static void uvm_up_read_mmap_lock_unsafe(uvm_va_space_t *va_space)
{
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
- // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
- // We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
- if (va_space->unsafe_mm)
- uvm_up_read_mmap_lock(va_space->unsafe_mm);
-#endif
}
static NV_STATUS service_ats_fault_entry(uvm_gpu_t *gpu,
NvU32 entry_index,
uvm_fault_service_batch_context_t *batch_context)
{
-#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT)
- if (uvm8_ats_mode)
- return uvm_ats_service_fault_entry(gpu, entry_index, batch_context);
-#else
UVM_ASSERT(!uvm8_ats_mode);
-#endif
return NV_ERR_NOT_SUPPORTED;
}
--- a/kernel/conftest.sh
+++ b/kernel/conftest.sh
@@ -1333,7 +1333,7 @@
echo "$CONFTEST_PREAMBLE
#include <linux/pci.h>
int conftest_pci_dma_mapping_error(void) {
- return pci_dma_mapping_error(NULL, 0);
+ return dma_mapping_error(NULL, 0);
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
@@ -2769,9 +2769,9 @@
# 2014-07-07 743162013d40ca612b4cb53d3a200dff2d9ab26e
#
echo "$CONFTEST_PREAMBLE
- #include <linux/wait.h>
+ #include <linux/wait_bit.h>
void conftest_wait_on_bit_lock(void) {
- wait_on_bit_lock(NULL, 0, 0);
+ wait_on_bit(NULL, 0, 0);
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
--- a/kernel/nvidia-drm/nvidia-drm-connector.c
+++ b/kernel/nvidia-drm/nvidia-drm-connector.c
@@ -98,7 +98,7 @@
break;
}
- if (connector->override_edid) {
+ if (connector->edid_override) {
const struct drm_property_blob *edid = connector->edid_blob_ptr;
if (edid->length <= sizeof(pDetectParams->edid.buffer)) {
@@ -212,9 +212,7 @@
}
static struct drm_connector_funcs nv_connector_funcs = {
-#if defined NV_DRM_ATOMIC_HELPER_CONNECTOR_DPMS_PRESENT
- .dpms = drm_atomic_helper_connector_dpms,
-#endif
+ .dpms = drm_helper_connector_dpms,
.destroy = nv_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.force = __nv_drm_connector_force,
--- a/kernel/nvidia-drm/nvidia-drm-encoder.c
+++ b/kernel/nvidia-drm/nvidia-drm-encoder.c
@@ -341,9 +341,6 @@
* On kernels where DRM has a primary group, we need to reinitialize
* after adding encoders and connectors.
*/
-#if defined(NV_DRM_REINIT_PRIMARY_MODE_GROUP_PRESENT)
- drm_reinit_primary_mode_group(dev);
-#endif
drm_kms_helper_hotplug_event(dev);
}
--- a/kernel/nvidia-drm/nvidia-drm-fb.c
+++ b/kernel/nvidia-drm/nvidia-drm-fb.c
@@ -31,6 +31,7 @@
#include "nvidia-drm-gem.h"
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper.h>
static void nv_drm_framebuffer_destroy(struct drm_framebuffer *fb)
{
--- a/kernel/nvidia-drm/nvidia-drm-gem.h
+++ b/kernel/nvidia-drm/nvidia-drm-gem.h
@@ -86,27 +86,13 @@
static inline void
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
{
-#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
-
-#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT)
- drm_gem_object_put_unlocked(&nv_gem->base);
-#else
drm_gem_object_put(&nv_gem->base);
-#endif
-
-#else
- drm_gem_object_unreference_unlocked(&nv_gem->base);
-#endif
}
static inline void
nv_drm_gem_object_unreference(struct nv_drm_gem_object *nv_gem)
{
-#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_put(&nv_gem->base);
-#else
- drm_gem_object_unreference(&nv_gem->base);
-#endif
}
static inline int nv_drm_gem_handle_create_drop_reference(
--- a/kernel/nvidia-drm/nvidia-drm-gem.c
+++ b/kernel/nvidia-drm/nvidia-drm-gem.c
@@ -51,10 +51,8 @@
nv_gem->ops->free(nv_gem);
}
-#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) && \
- defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG)
static int nv_drm_gem_vmap(struct drm_gem_object *gem,
- struct dma_buf_map *map)
+ struct iosys_map *map)
{
map->vaddr = nv_drm_gem_prime_vmap(gem);
if (map->vaddr == NULL) {
@@ -65,12 +63,11 @@
}
static void nv_drm_gem_vunmap(struct drm_gem_object *gem,
- struct dma_buf_map *map)
+ struct iosys_map *map)
{
nv_drm_gem_prime_vunmap(gem, map->vaddr);
map->vaddr = NULL;
}
-#endif
#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) || \
!defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
@@ -80,13 +77,8 @@
#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
.export = nv_drm_gem_prime_export,
-#if defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG)
.vmap = nv_drm_gem_vmap,
.vunmap = nv_drm_gem_vunmap,
-#else
- .vmap = nv_drm_gem_prime_vmap,
- .vunmap = nv_drm_gem_prime_vunmap,
-#endif
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
.vm_ops = &nv_drm_gem_vma_ops,
--- a/kernel/nvidia-drm/nvidia-drm-drv.c
+++ b/kernel/nvidia-drm/nvidia-drm-drv.c
@@ -242,7 +242,7 @@
/* Currently unused. Update when needed. */
- dev->mode_config.fb_base = 0;
+ //dev->mode_config.fb_base = 0;
dev->mode_config.async_page_flip = false;
@@ -653,26 +653,26 @@
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_NVKMS_MEMORY,
nv_drm_gem_import_nvkms_memory_ioctl,
- DRM_UNLOCKED),
+ 0),
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_USERSPACE_MEMORY,
nv_drm_gem_import_userspace_memory_ioctl,
- DRM_RENDER_ALLOW|DRM_UNLOCKED),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NVIDIA_GET_DEV_INFO,
nv_drm_get_dev_info_ioctl,
- DRM_RENDER_ALLOW|DRM_UNLOCKED),
+ DRM_RENDER_ALLOW),
#if defined(NV_DRM_FENCE_AVAILABLE)
DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_SUPPORTED,
nv_drm_fence_supported_ioctl,
- DRM_RENDER_ALLOW|DRM_UNLOCKED),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_CONTEXT_CREATE,
nv_drm_fence_context_create_ioctl,
- DRM_RENDER_ALLOW|DRM_UNLOCKED),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NVIDIA_GEM_FENCE_ATTACH,
nv_drm_gem_fence_attach_ioctl,
- DRM_RENDER_ALLOW|DRM_UNLOCKED),
+ DRM_RENDER_ALLOW),
#endif
DRM_IOCTL_DEF_DRV(NVIDIA_GET_CLIENT_CAPABILITY,
@@ -681,7 +681,7 @@
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32,
nv_drm_get_crtc_crc32_ioctl,
- DRM_RENDER_ALLOW|DRM_UNLOCKED),
+ DRM_RENDER_ALLOW),
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
};
@@ -700,7 +700,7 @@
.ioctls = nv_drm_ioctls,
.num_ioctls = ARRAY_SIZE(nv_drm_ioctls),
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_handle_to_fd = NULL,
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
.gem_prime_export = nv_drm_gem_prime_export,
@@ -761,7 +761,7 @@
nv_drm_driver.dumb_create = nv_drm_dumb_create;
nv_drm_driver.dumb_map_offset = nv_drm_dumb_map_offset;
- nv_drm_driver.dumb_destroy = nv_drm_dumb_destroy;
+ //nv_drm_driver.dumb_destroy = nv_drm_dumb_destroy;
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
nv_drm_driver.gem_vm_ops = &nv_drm_gem_vma_ops;
--- a/kernel/nvidia-drm/nvidia-dma-resv-helper.h
+++ b/kernel/nvidia-drm/nvidia-dma-resv-helper.h
@@ -69,7 +69,7 @@
nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
- dma_resv_add_excl_fence(obj, fence);
+ dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_WRITE);
#else
reservation_object_add_excl_fence(obj, fence);
#endif
--- a/kernel/nvidia-drm/nvidia-drm-helper.c
+++ b/kernel/nvidia-drm/nvidia-drm-helper.c
@@ -39,6 +39,8 @@
#include <drm/drm_atomic_uapi.h>
#endif
+#include <drm/drm_framebuffer.h>
+
static void __nv_drm_framebuffer_put(struct drm_framebuffer *fb)
{
#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT)
--- a/kernel/nvidia-drm/nvidia-drm-helper.h
+++ b/kernel/nvidia-drm/nvidia-drm-helper.h
@@ -391,22 +391,14 @@
nv_drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
-#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME)
- return drm_mode_connector_attach_encoder(connector, encoder);
-#else
return drm_connector_attach_encoder(connector, encoder);
-#endif
}
static inline int
nv_drm_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid)
{
-#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME)
- return drm_mode_connector_update_edid_property(connector, edid);
-#else
return drm_connector_update_edid_property(connector, edid);
-#endif
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
--- a/kernel/nvidia/os-interface.c
+++ b/kernel/nvidia/os-interface.c
@@ -444,13 +444,8 @@
void NV_API_CALL os_get_current_tick(NvU64 *nseconds)
{
-#if defined(NV_JIFFIES_TO_TIMESPEC_PRESENT)
- struct timespec ts;
- jiffies_to_timespec(jiffies, &ts);
-#else
struct timespec64 ts;
jiffies_to_timespec64(jiffies, &ts);
-#endif
*nseconds = ((NvU64)ts.tv_sec * NSEC_PER_SEC + (NvU64)ts.tv_nsec);
}
@@ -849,7 +844,7 @@
break;
case NV_MEMORY_UNCACHED:
case NV_MEMORY_DEFAULT:
- vaddr = nv_ioremap_nocache(start, size_bytes);
+ vaddr = nv_ioremap(start, size_bytes);
break;
default:
nv_printf(NV_DBG_ERRORS,
--- a/kernel/nvidia/os-mlock.c
+++ b/kernel/nvidia/os-mlock.c
@@ -18,12 +18,21 @@
unsigned long address,
unsigned long *pfn)
{
-#if defined(NV_UNSAFE_FOLLOW_PFN_PRESENT)
- return unsafe_follow_pfn(vma, address, pfn);
-#else
- return follow_pfn(vma, address, pfn);
-#endif
-}
+ int status = 0;
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ return status;
+
+ status = follow_pte(vma, address, &ptep, &ptl);
+ if (status)
+ return status;
+ *pfn = pte_pfn(ptep_get(ptep));
+
+ // The lock is acquired inside follow_pte()
+ pte_unmap_unlock(ptep, ptl);
+ return 0;}
NV_STATUS NV_API_CALL os_lookup_user_io_memory(
void *address,
--- a/kernel/nvidia/nv-acpi.c
+++ b/kernel/nvidia/nv-acpi.c
@@ -24,7 +24,7 @@
static int nv_acpi_add (struct acpi_device *);
#if !defined(NV_ACPI_DEVICE_OPS_REMOVE_ARGUMENT_COUNT) || (NV_ACPI_DEVICE_OPS_REMOVE_ARGUMENT_COUNT == 2)
-static int nv_acpi_remove_two_args(struct acpi_device *device, int type);
+static void nv_acpi_remove_two_args(struct acpi_device *device);
#else
static int nv_acpi_remove_one_arg(struct acpi_device *device);
#endif
@@ -215,6 +215,24 @@
return 0;
}
+LIST_HEAD( acpi_children );
+
+struct acpi_child
+{ struct list_head node;
+ struct acpi_device data;
+};
+
+static
+int
+acpi_add_child( struct acpi_device *dev
+, void *data
+){ struct acpi_child *node = kmalloc( sizeof( struct acpi_child ), GFP_KERNEL );
+ node->data = *dev;
+ INIT_LIST_HEAD( &node->node );
+ list_add_tail( &node->node, &acpi_children );
+ return 0;
+}
+
static int nv_acpi_add(struct acpi_device *device)
{
/*
@@ -230,6 +248,7 @@
struct list_head *node, *next;
nv_acpi_integer_t device_id = 0;
int device_counter = 0;
+ struct acpi_child *entry, *temp;
status = nv_kmem_cache_alloc_stack(&sp);
if (status != 0)
@@ -256,10 +275,11 @@
// grab handles to all the important nodes representing devices
- list_for_each_safe(node, next, &device->children)
+ acpi_dev_for_each_child( device, acpi_add_child, 0 );
+ list_for_each_safe(node, next, &acpi_children)
{
struct acpi_device *dev =
- list_entry(node, struct acpi_device, node);
+ &list_entry(node, struct acpi_child, node)->data;
if (!dev)
continue;
@@ -297,6 +317,11 @@
device_counter++;
}
+ list_for_each_entry_safe( entry, temp, &acpi_children, node )
+ { list_del( &entry->node );
+ kfree(entry);
+ }
+
// arg 0, bits 1:0, 0 = enable events
control_argument_0.integer.type = ACPI_TYPE_INTEGER;
@@ -333,7 +358,7 @@
}
#if !defined(NV_ACPI_DEVICE_OPS_REMOVE_ARGUMENT_COUNT) || (NV_ACPI_DEVICE_OPS_REMOVE_ARGUMENT_COUNT == 2)
-static int nv_acpi_remove_two_args(struct acpi_device *device, int type)
+static void nv_acpi_remove_two_args(struct acpi_device *device)
#else
static int nv_acpi_remove_one_arg(struct acpi_device *device)
#endif
@@ -385,8 +410,6 @@
module_put(THIS_MODULE);
device->driver_data = NULL;
}
-
- return status;
}
static void nv_acpi_event(acpi_handle handle, u32 event_type, void *data)
@@ -590,7 +613,6 @@
{
struct acpi_device *device = NULL;
NV_STATUS rmStatus;
- int retVal = -1;
if (!handlesPresent) // Caller passed us invalid pointer.
@@ -610,9 +632,9 @@
if (!nvif_parent_gpu_handle) /* unknown error */
break;
- retVal = acpi_bus_get_device(nvif_parent_gpu_handle, &device);
+ device = acpi_get_acpi_dev(nvif_parent_gpu_handle);
- if (ACPI_FAILURE(retVal) || !device)
+ if (!device)
break;
if (device->driver_data)
@@ -684,7 +706,7 @@
if (nvif_parent_gpu_handle == NULL)
return;
- acpi_bus_get_device(nvif_parent_gpu_handle, &device);
+ device = acpi_get_acpi_dev(nvif_parent_gpu_handle);
nv_uninstall_notifier(device, nv_acpi_event);
nvif_parent_gpu_handle = NULL;
@@ -1237,6 +1259,7 @@
NvU32 i;
acpi_handle dev_handle = NULL;
acpi_handle lcd_dev_handle = NULL;
+ struct acpi_child *entry, *temp;
if (!nv_acpi_get_device_handle(nv, &dev_handle))
return NV_ERR_NOT_SUPPORTED;
@@ -1244,9 +1267,9 @@
if (!dev_handle)
return NV_ERR_INVALID_ARGUMENT;
- status = acpi_bus_get_device(dev_handle, &device);
+ device = acpi_get_acpi_dev(dev_handle);
- if (ACPI_FAILURE(status) || !device)
+ if (!device)
return NV_ERR_INVALID_ARGUMENT;
if (!NV_MAY_SLEEP())
@@ -1259,10 +1282,11 @@
return NV_ERR_NOT_SUPPORTED;
}
- list_for_each_safe(node, next, &device->children)
+ acpi_dev_for_each_child( device, acpi_add_child, 0 );
+ list_for_each_safe(node, next, &acpi_children)
{
struct acpi_device *dev =
- list_entry(node, struct acpi_device, node);
+ &list_entry(node, struct acpi_child, node)->data;
if (!dev)
continue;
@@ -1282,6 +1306,10 @@
}
}
+ list_for_each_entry_safe( entry, temp, &acpi_children, node )
+ { list_del( &entry->node );
+ kfree(entry);
+ }
if (lcd_dev_handle == NULL)
{
--- a/kernel/nvidia/nv-mmap.c
+++ b/kernel/nvidia/nv-mmap.c
@@ -433,7 +433,7 @@
addr = mmap_start;
// Needed for the linux kernel for mapping compound pages
- vma->vm_flags |= VM_MIXEDMAP;
+ vm_flags_set( vma, VM_MIXEDMAP );
for (j = 0; j < pages; j++)
{
@@ -457,7 +457,7 @@
}
}
- vma->vm_flags |= VM_IO;
+ vm_flags_set( vma, VM_IO );
}
else
{
@@ -519,15 +519,15 @@
NV_PRINT_AT(NV_DBG_MEMINFO, at);
- vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED);
- vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
+ vm_flags_set( vma, VM_IO | VM_LOCKED | VM_RESERVED );
+ vm_flags_set( vma, VM_DONTEXPAND | VM_DONTDUMP );
}
if ((prot & NV_PROTECT_WRITEABLE) == 0)
{
vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot);
- vma->vm_flags &= ~VM_WRITE;
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear( vma, VM_WRITE );
+ vm_flags_clear( vma, VM_MAYWRITE );
}
vma->vm_ops = &nv_vm_ops;
--- a/kernel/nvidia/nv-vm.c
+++ b/kernel/nvidia/nv-vm.c
@@ -81,14 +81,12 @@
{
switch (type)
{
-#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
case NV_MEMORY_UNCACHED:
- set_memory_array_uc(pages, num_pages);
+ set_memory_uc(( unsigned long )pages, num_pages);
break;
case NV_MEMORY_WRITEBACK:
- set_memory_array_wb(pages, num_pages);
+ set_memory_wb(( unsigned long )pages, num_pages);
break;
-#endif
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): type %d unimplemented\n",
--- a/kernel/nvidia/nv-dma.c
+++ b/kernel/nvidia/nv-dma.c
@@ -27,9 +27,9 @@
NvU64 *va
)
{
- *va = pci_map_page(dma_map->dev, dma_map->pages[0], 0,
- dma_map->page_count * PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (NV_PCI_DMA_MAPPING_ERROR(dma_map->dev, *va))
+ *va = dma_map_page(&((struct pci_dev *)dma_map->dev)->dev, dma_map->pages[0], 0,
+ dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (NV_DMA_MAPPING_ERROR(&((struct pci_dev *)dma_map->dev)->dev, *va))
{
return NV_ERR_OPERATING_SYSTEM;
}
@@ -57,8 +57,8 @@
static void nv_dma_unmap_contig(nv_dma_map_t *dma_map)
{
- pci_unmap_page(dma_map->dev, dma_map->mapping.contig.dma_addr,
- dma_map->page_count * PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&((struct pci_dev *)dma_map->dev)->dev, dma_map->mapping.contig.dma_addr,
+ dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL);
}
static void nv_fill_scatterlist
@@ -166,10 +166,10 @@
NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i)
{
- submap->sg_map_count = pci_map_sg(dma_map->dev,
+ submap->sg_map_count = dma_map_sg(&((struct pci_dev *)dma_map->dev)->dev,
NV_DMA_SUBMAP_SCATTERLIST(submap),
NV_DMA_SUBMAP_SCATTERLIST_LENGTH(submap),
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
if (submap->sg_map_count == 0)
{
status = NV_ERR_OPERATING_SYSTEM;
@@ -197,9 +197,9 @@
break;
}
- pci_unmap_sg(dma_map->dev, NV_DMA_SUBMAP_SCATTERLIST(submap),
+ dma_unmap_sg(&((struct pci_dev *)dma_map->dev)->dev, NV_DMA_SUBMAP_SCATTERLIST(submap),
NV_DMA_SUBMAP_SCATTERLIST_LENGTH(submap),
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
}
}
--- a/kernel/nvidia/nv.c
+++ b/kernel/nvidia/nv.c
@@ -2742,7 +2742,7 @@
if (!nvl->tce_bypass_enabled)
{
NvU64 new_mask = (((NvU64)1) << phys_addr_bits) - 1;
- pci_set_dma_mask(nvl->dev, new_mask);
+ dma_set_mask(&((struct pci_dev *)nvl->dev)->dev, new_mask);
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment