본문 바로가기

IT

참고 자료: 리눅스 개발자 커널 4.10용 엔비디아 378.09 베타 드라이버 패치

diff -urN nvidia-378-378.09/common/inc/nv-linux.h nvidia-378-378.09-patched/common/inc/nv-linux.h
--- nvidia-378-378.09/common/inc/nv-linux.h     2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/common/inc/nv-linux.h     2017-01-19 16:17:44.502340096 +0900
@@ -294,7 +294,8 @@
 
 extern int nv_pat_mode;
 
-#if defined(CONFIG_HOTPLUG_CPU)
+//#if defined(CONFIG_HOTPLUG_CPU)
+#if 0
 #define NV_ENABLE_HOTPLUG_CPU
 #include               /* CPU hotplug support              */
 #include          /* struct notifier_block, etc       */
diff -urN nvidia-378-378.09/common/inc/nv-mm.h nvidia-378-378.09-patched/common/inc/nv-mm.h
--- nvidia-378-378.09/common/inc/nv-mm.h        2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/common/inc/nv-mm.h        2017-01-19 16:18:42.860689796 +0900
@@ -94,7 +94,7 @@
         #else
 
                return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
-                                            pages, vmas);
+                                            pages, vmas, NULL);
 
         #endif
 
diff -urN nvidia-378-378.09/nvidia/nv-p2p.c nvidia-378-378.09-patched/nvidia/nv-p2p.c
--- nvidia-378-378.09/nvidia/nv-p2p.c   2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/nvidia/nv-p2p.c   2017-01-19 16:21:32.895902404 +0900
@@ -146,7 +146,7 @@
 int nvidia_p2p_get_pages(
     uint64_t p2p_token,
     uint32_t va_space,
-    uint64_t virtual_address,
+    uint64_t address,
     uint64_t length,
     struct nvidia_p2p_page_table **page_table,
     void (*free_callback)(void * data),
@@ -211,7 +211,7 @@
     }
 
     status = rm_p2p_get_pages(sp, p2p_token, va_space,
-            virtual_address, length, physical_addresses, wreqmb_h,
+            address, length, physical_addresses, wreqmb_h,
             rreqmb_h, &entries, &gpu_uuid, *page_table,
             free_callback, data);
     if (status != NV_OK)
@@ -286,7 +286,7 @@
 
     if (bGetPages)
     {
-        rm_p2p_put_pages(sp, p2p_token, va_space, virtual_address,
+        rm_p2p_put_pages(sp, p2p_token, va_space, address,
                 gpu_uuid, *page_table);
     }
 
@@ -329,7 +329,7 @@
 int nvidia_p2p_put_pages(
     uint64_t p2p_token,
     uint32_t va_space,
-    uint64_t virtual_address,
+    uint64_t address,
     struct nvidia_p2p_page_table *page_table
 )
 {
@@ -343,7 +343,7 @@
         return rc;
     }
 
-    status = rm_p2p_put_pages(sp, p2p_token, va_space, virtual_address,
+    status = rm_p2p_put_pages(sp, p2p_token, va_space, address,
             page_table->gpu_uuid, page_table);
     if (status == NV_OK)
         nvidia_p2p_free_page_table(page_table);
diff -urN nvidia-378-378.09/nvidia/nv-pat.c nvidia-378-378.09-patched/nvidia/nv-pat.c
--- nvidia-378-378.09/nvidia/nv-pat.c   2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/nvidia/nv-pat.c   2017-01-19 16:26:20.759577451 +0900
@@ -217,7 +217,7 @@
             else
                 NV_SMP_CALL_FUNCTION(nv_setup_pat_entries, hcpu, 1);
             break;
-        case CPU_DOWN_PREPARE:
+        case CPU_DOWN_PREPARE_FROZEN:
             if (cpu == (NvUPtr)hcpu)
                 nv_restore_pat_entries(NULL);
             else
diff -urN nvidia-378-378.09/nvidia-drm/nvidia-drm-fence.c nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-fence.c
--- nvidia-378-378.09/nvidia-drm/nvidia-drm-fence.c     2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-fence.c     2017-01-19 17:25:06.951449131 +0900
@@ -31,7 +31,7 @@
 
 #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
 struct nv_fence {
-    struct fence base;
+    struct dma_fence base;
     spinlock_t lock;
 
     struct nvidia_drm_device *nv_dev;
@@ -51,7 +51,7 @@
 
 static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
 (
-    struct fence *fence
+    struct dma_fence *fence
 )
 {
     return "NVIDIA";
@@ -59,7 +59,7 @@
 
 static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
 (
-    struct fence *fence
+    struct dma_fence *fence
 )
 {
     return "nvidia.prime";
@@ -67,7 +67,7 @@
 
 static bool nvidia_drm_gem_prime_fence_op_signaled
 (
-    struct fence *fence
+    struct dma_fence *fence
 )
 {
     struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
@@ -99,7 +99,7 @@
 
 static bool nvidia_drm_gem_prime_fence_op_enable_signaling
 (
-    struct fence *fence
+    struct dma_fence *fence
 )
 {
     bool ret = true;
@@ -107,7 +107,7 @@
     struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem;
     struct nvidia_drm_device *nv_dev = nv_fence->nv_dev;
 
-    if (fence_is_signaled(fence))
+    if (dma_fence_is_signaled(fence))
     {
         return false;
     }
@@ -132,7 +132,7 @@
     }
 
     nv_gem->fenceContext.softFence = fence;
-    fence_get(fence);
+    dma_fence_get(fence);
 
 unlock_struct_mutex:
     mutex_unlock(&nv_dev->dev->struct_mutex);
@@ -142,7 +142,7 @@
 
 static void nvidia_drm_gem_prime_fence_op_release
 (
-    struct fence *fence
+    struct dma_fence *fence
 )
 {
     struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
@@ -151,7 +151,7 @@
 
 static signed long nvidia_drm_gem_prime_fence_op_wait
 (
-    struct fence *fence,
+    struct dma_fence *fence,
     bool intr,
     signed long timeout
 )
@@ -166,12 +166,12 @@
      * that it should never get hit during normal operation, but not so long
      * that the system becomes unresponsive.
      */
-    return fence_default_wait(fence, intr,
+    return dma_fence_default_wait(fence, intr,
                               (timeout == MAX_SCHEDULE_TIMEOUT) ?
                                   msecs_to_jiffies(96) : timeout);
 }
 
-static const struct fence_ops nvidia_drm_gem_prime_fence_ops = {
+static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = {
     .get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name,
     .get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name,
     .signaled = nvidia_drm_gem_prime_fence_op_signaled,
@@ -281,7 +281,7 @@
     bool force
 )
 {
-    struct fence *fence = nv_gem->fenceContext.softFence;
+    struct dma_fence *fence = nv_gem->fenceContext.softFence;
 
     WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex));
 
@@ -297,10 +297,10 @@
 
         if (force || nv_fence_ready_to_signal(nv_fence))
         {
-            fence_signal(&nv_fence->base);
+            dma_fence_signal(&nv_fence->base);
 
             nv_gem->fenceContext.softFence = NULL;
-            fence_put(&nv_fence->base);
+            dma_fence_put(&nv_fence->base);
 
             nvKms->disableChannelEvent(nv_dev->pDevice,
                                        nv_gem->fenceContext.cb);
@@ -316,7 +316,7 @@
 
         nv_fence = container_of(fence, struct nv_fence, base);
 
-        fence_signal(&nv_fence->base);
+        dma_fence_signal(&nv_fence->base);
     }
 }
 
@@ -509,7 +509,7 @@
      * fence_context_alloc() cannot fail, so we do not need to check a return
      * value.
      */
-    nv_gem->fenceContext.context = fence_context_alloc(1);
+    nv_gem->fenceContext.context = dma_fence_context_alloc(1);
 
     ret = nvidia_drm_gem_prime_fence_import_semaphore(
               nv_dev, nv_gem, p->index,
@@ -666,13 +666,13 @@
     nv_fence->nv_gem = nv_gem;
 
     spin_lock_init(&nv_fence->lock);
-    fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
+    dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
                &nv_fence->lock, nv_gem->fenceContext.context,
                p->sem_thresh);
 
     reservation_object_add_excl_fence(&nv_gem->fenceContext.resv,
                                       &nv_fence->base);
-    fence_put(&nv_fence->base); /* Reservation object has reference */
+    dma_fence_put(&nv_fence->base); /* Reservation object has reference */
 
     ret = 0;
 
diff -urN nvidia-378-378.09/nvidia-drm/nvidia-drm-gem.c nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-gem.c
--- nvidia-378-378.09/nvidia-drm/nvidia-drm-gem.c       2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-gem.c       2017-01-19 17:09:06.914681259 +0900
@@ -510,7 +510,7 @@
 static int nvidia_drm_vma_fault(struct vm_area_struct *vma,
                                 struct vm_fault *vmf)
 {
-    unsigned long address = (unsigned long)vmf->virtual_address;
+    unsigned long address = (unsigned long)vmf->address;
     struct drm_gem_object *gem = vma->vm_private_data;
     struct nvidia_drm_gem_object *nv_gem =
                     DRM_GEM_OBJECT_TO_NV_GEM_OBJECT(gem);
diff -urN nvidia-378-378.09/nvidia-drm/nvidia-drm-gem.h nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-gem.h
--- nvidia-378-378.09/nvidia-drm/nvidia-drm-gem.h       2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-gem.h       2017-01-19 16:40:17.895719472 +0900
@@ -101,7 +101,7 @@
         /* Software signaling structures */
         struct NvKmsKapiChannelEvent *cb;
         struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs;
-        struct fence *softFence; /* Fence for software signaling */
+        struct dma_fence *softFence; /* Fence for software signaling */
     } fenceContext;
 #endif
 };
diff -urN nvidia-378-378.09/nvidia-drm/nvidia-drm-modeset.c nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-modeset.c
--- nvidia-378-378.09/nvidia-drm/nvidia-drm-modeset.c   2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-modeset.c   2017-01-19 17:27:37.323210121 +0900
@@ -633,7 +633,7 @@
         wake_up_all(&nv_dev->pending_commit_queue);
     }
 
-    nv_drm_atomic_state_free(state);
+    nv_drm_atomic_state_put(state);
 
 #if !defined(NV_DRM_MODE_CONFIG_FUNCS_HAS_ATOMIC_STATE_ALLOC)
     nvidia_drm_free(requested_config);
diff -urN nvidia-378-378.09/nvidia-drm/nvidia-drm-priv.h nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-priv.h
--- nvidia-378-378.09/nvidia-drm/nvidia-drm-priv.h      2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/nvidia-drm/nvidia-drm-priv.h      2017-01-19 16:51:35.440775609 +0900
@@ -34,7 +34,7 @@
 #endif
 
 #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
-#include 
+#include 
 #include 
 #endif
 
diff -urN nvidia-378-378.09/nvidia-uvm/uvm8_test.c nvidia-378-378.09-patched/nvidia-uvm/uvm8_test.c
--- nvidia-378-378.09/nvidia-uvm/uvm8_test.c    2017-01-19 15:50:30.000000000 +0900
+++ nvidia-378-378.09-patched/nvidia-uvm/uvm8_test.c    2017-01-19 16:58:05.973737462 +0900
@@ -103,7 +103,7 @@
     return NV_ERR_INVALID_STATE;
 }
 
-static NV_STATUS uvm8_test_get_kernel_virtual_address(
+static NV_STATUS uvm8_test_get_kernel_address(
         UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS_PARAMS *params,
         struct file *filp)
 {
@@ -173,7 +173,7 @@
         UVM_ROUTE_CMD_STACK(UVM_TEST_RANGE_GROUP_RANGE_COUNT,       uvm8_test_range_group_range_count);
         UVM_ROUTE_CMD_STACK(UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_get_prefetch_faults_reenable_lapse);
         UVM_ROUTE_CMD_STACK(UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_set_prefetch_faults_reenable_lapse);
-        UVM_ROUTE_CMD_STACK(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS,    uvm8_test_get_kernel_virtual_address);
+        UVM_ROUTE_CMD_STACK(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS,    uvm8_test_get_kernel_address);
         UVM_ROUTE_CMD_STACK(UVM_TEST_PMA_ALLOC_FREE,                uvm8_test_pma_alloc_free);
         UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_ALLOC_FREE_ROOT,           uvm8_test_pmm_alloc_free_root);
         UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR,    uvm8_test_pmm_inject_pma_evict_error);