NVIDIA-Linux-x86_64-361.16.run
Posted: Wed Jan 06, 2016 9:02 pm
Hello,
The latest version of the NVIDIA drivers (above) has modified the Unified Virtual Memory code to call NV_KMEM_CACHE_CREATE 41 times.
All the calls currently have 2 arguments. In a previous patch supplied by paxguy1, kernel/common/inc/nv-linux.h and kernel/nvidia/nv.c were modified for SLAB_USERCOPY. The 41 calls are below (formatted to fit one per line).
uvm8_range_group.c: g_uvm_range_group_cache = NV_KMEM_CACHE_CREATE("uvm_range_group_t", uvm_range_group_t);
uvm8_thread_context.c: g_uvm_thread_context_cache = NV_KMEM_CACHE_CREATE("uvm_thread_context_t", uvm_thread_context_t);
uvm8_va_block.c: g_uvm_va_block_cache = NV_KMEM_CACHE_CREATE("uvm_va_block_t", uvm_va_block_t);
uvm8_va_range.c: g_uvm_va_range_cache = NV_KMEM_CACHE_CREATE("uvm_va_range_t", uvm_va_range_t);
uvm_channel_mgmt.c: g_uvmChannelManagerCache = NV_KMEM_CACHE_CREATE("uvm_channel_manager_t", UvmChannelManager);
uvm_channel_mgmt.c: g_uvmRingbufferCache = NV_KMEM_CACHE_CREATE("uvm_ringbuffer_t", UvmRingbuffer);
uvm_channel_mgmt.c: g_uvmChannelCache = NV_KMEM_CACHE_CREATE("uvm_channel_t", UvmChannel);
uvm_channel_mgmt.c: g_uvmPushbufferCache = NV_KMEM_CACHE_CREATE("uvm_pushbuffer_t", UvmPushbuffer);
uvm_channel_mgmt.c: g_uvmTrackerCache = NV_KMEM_CACHE_CREATE("uvm_tracker_t", UvmTracker);
uvm_channel_mgmt.c: g_uvmTrackerItemCache = NV_KMEM_CACHE_CREATE("uvm_trackeritem_t", UvmTrackerItem);
uvm_full_ctx_mgmt.c: g_uvmClientChannelCache = NV_KMEM_CACHE_CREATE("uvm_channel_info", UvmClientChannel);
uvm_full_ctx_mgmt.c: g_uvmVaspaceCache = NV_KMEM_CACHE_CREATE("uvm_va_space", UvmVaspace);
uvm_full_ctx_mgmt.c: g_uvmRmMemoryCache = NV_KMEM_CACHE_CREATE("uvm_rm_memory", UvmRmMemory);
uvm_full_ctx_mgmt.c: g_uvmRmMemoryRegionCache = NV_KMEM_CACHE_CREATE("uvm_rm_memory_region", UvmRmMemoryRegion);
uvm_full_ctx_mgmt.c: g_uvmEngCtxCache = NV_KMEM_CACHE_CREATE("uvm_eng_ctx", UvmEngCtx);
uvm_full_ctx_mgmt.c: g_uvmMappingCache = NV_KMEM_CACHE_CREATE("uvm_mapping", struct address_space);
uvm_full_device_mgmt.c: g_uvmPendingFaultCache = NV_KMEM_CACHE_CREATE("uvm_gpu_pending_faults", UvmPendingFault);
uvm_full_fault_handler.c: g_uvmFaultCancelEntryCache = NV_KMEM_CACHE_CREATE("uvm_fault_cancel_info", UvmGpuCancelFaultInfo);
uvm_full_identity_map.c: g_uvmMapListNodeCache = NV_KMEM_CACHE_CREATE("uvm_mapListNode", mapList_node);
uvm_full_pa_mgmt.c: g_uvmPaManagerCache = NV_KMEM_CACHE_CREATE("uvm_pa_manager", UvmPaManager);
uvm_full_pa_mgmt.c: g_UvmPaDescCache = NV_KMEM_CACHE_CREATE("uvm_pa_desc", UvmPaDesc);
uvm_full_pa_mgmt.c: g_uvmPaToVaCache = NV_KMEM_CACHE_CREATE("uvm_pa_to_va", UvmPaToVa);
uvm_full_pagetbl_mgmt.c: g_uvm2MVaDescCache = NV_KMEM_CACHE_CREATE("uvm_2mvadesc", Uvm2MbVaDesc);
uvm_full_pagetbl_mgmt.c: g_uvmGpuPdeCache = NV_KMEM_CACHE_CREATE("uvm_gpupde", UvmGpuPde);
uvm_full_pagetbl_mgmt.c: g_uvmPbOpsCache = NV_KMEM_CACHE_CREATE("uvm_pbops", UvmPbOps);
uvm_full_va_trie.c: g_uvmVatL0Cache = NV_KMEM_CACHE_CREATE("uvm_vatL0Cache", UvmVatL0);
uvm_full_va_trie.c: g_uvmVatL1Cache = NV_KMEM_CACHE_CREATE("uvm_vatL1Cache", UvmVatL1);
uvm_full_va_trie.c: g_uvmVatL2Cache = NV_KMEM_CACHE_CREATE("uvm_vatL2Cache", UvmVatL2);
uvm_full_va_trie.c: g_uvmVatGpuHeaderCache = NV_KMEM_CACHE_CREATE("uvm_vatGpuHeaderCache", UvmVatLevelGpuHeader);
uvm_kernel_counters.c: g_UvmCounterContainerCache = NV_KMEM_CACHE_CREATE("uvm_counter_container_t", struct UvmCounterContainer_tag);
uvm_kernel_events.c: g_uvmEventContainerCache = NV_KMEM_CACHE_CREATE("uvm_event_container_t", struct UvmEventContainer_tag);
uvm_lite.c: g_uvmPrivateCache = NV_KMEM_CACHE_CREATE("uvm_private_t", struct DriverPrivate_tag);
uvm_lite.c: g_uvmCommitRecordCache = NV_KMEM_CACHE_CREATE("uvm_commit_record_t", struct UvmCommitRecord_tag);
uvm_lite.c: g_uvmMigTrackerCache = NV_KMEM_CACHE_CREATE("uvm_mig_tracker_t", struct UvmGpuMigrationTracking_tag);
uvm_lite.c: g_uvmStreamRecordCache = NV_KMEM_CACHE_CREATE("uvm_stream_record_t", struct UvmStreamRecord_tag);
uvm_lite.c: g_uvmMappingCache = NV_KMEM_CACHE_CREATE("uvm_mapping_t", struct address_space);
uvm_lite.c: g_uvmMpsServerCache = NV_KMEM_CACHE_CREATE("uvm_mps_server_t", struct UvmMpsServer_tag);
uvm_lite_prefetch.c: g_uvmLitePrefetchRegionAccessCache = NV_KMEM_CACHE_CREATE("UvmRegionAccess", UvmRegionAccess);
uvm_lite_region_tracking.c: g_uvmTrackingTreeCache = NV_KMEM_CACHE_CREATE("uvm_region_tracker_t", struct s_UvmRegionTracker);
uvm_lite_region_tracking.c: g_uvmTrackingTreeNodeCache = NV_KMEM_CACHE_CREATE("uvm_region_tracker_node_t", struct _tree_node);
uvm_page_cache.c: g_uvmPageTrackingCache = NV_KMEM_CACHE_CREATE("uvm_page_tracking_t", struct UvmPageTracking_tag);
Any chance you could tell me which ones need SLAB_USERCOPY as the third argument and which ones need a 0 or possibly provide a patch, similar to https://grsecurity.net/~paxguy1/nvidia- ... -pax.patch? Also, the second part of that patch to kernel/nvidia-uvm/uvm_common.c also has issues with the new NVIDIA driver. This looks like it can be fixed with something similar to this:
{
NV_STATUS status = NV_OK;
- g_exportedUvmOps.startDevice = uvm_gpu_event_start_device;
- g_exportedUvmOps.stopDevice = uvm_gpu_event_stop_device;
- if (uvmnext_activated())
- g_exportedUvmOps.isrTopHalf = uvmnext_isr_top_half;
- else if (uvmfull_activated())
- g_exportedUvmOps.isrTopHalf = uvmfull_isr_top_half;
- fi
+#ifdef NVIDIA_UVM_NEXT_ENABLED
+ static struct UvmOpsUvmEvents g_exportedUvmOps = {
+ .startDevice = uvm_gpu_event_start_device,
+ .stopDevice = uvm_gpu_event_stop_device,
+ .isrTopHalf = uvmnext_isr_top_half,
+ };
+#else
+ static struct UvmOpsUvmEvents g_exportedUvmOps = {
+ .startDevice = uvm_gpu_event_start_device,
+ .stopDevice = uvm_gpu_event_stop_device,
+ .isrTopHalf = uvmfull_isr_top_half,
+ };
+#endif //NVIDIA_UVM_NEXT_ENABLED
// call RM to exchange the function pointers.
status = nvUvmInterfaceRegisterUvmCallbacks(&g_exportedUvmOps);
Thanks for any help you can provide
The latest version of the NVIDIA drivers (above) has modified the Unified Virtual Memory code to call NV_KMEM_CACHE_CREATE 41 times.
All the calls currently have 2 arguments. In a previous patch supplied by paxguy1, kernel/common/inc/nv-linux.h and kernel/nvidia/nv.c were modified for SLAB_USERCOPY. The 41 calls are below (formatted to fit one per line).
uvm8_range_group.c: g_uvm_range_group_cache = NV_KMEM_CACHE_CREATE("uvm_range_group_t", uvm_range_group_t);
uvm8_thread_context.c: g_uvm_thread_context_cache = NV_KMEM_CACHE_CREATE("uvm_thread_context_t", uvm_thread_context_t);
uvm8_va_block.c: g_uvm_va_block_cache = NV_KMEM_CACHE_CREATE("uvm_va_block_t", uvm_va_block_t);
uvm8_va_range.c: g_uvm_va_range_cache = NV_KMEM_CACHE_CREATE("uvm_va_range_t", uvm_va_range_t);
uvm_channel_mgmt.c: g_uvmChannelManagerCache = NV_KMEM_CACHE_CREATE("uvm_channel_manager_t", UvmChannelManager);
uvm_channel_mgmt.c: g_uvmRingbufferCache = NV_KMEM_CACHE_CREATE("uvm_ringbuffer_t", UvmRingbuffer);
uvm_channel_mgmt.c: g_uvmChannelCache = NV_KMEM_CACHE_CREATE("uvm_channel_t", UvmChannel);
uvm_channel_mgmt.c: g_uvmPushbufferCache = NV_KMEM_CACHE_CREATE("uvm_pushbuffer_t", UvmPushbuffer);
uvm_channel_mgmt.c: g_uvmTrackerCache = NV_KMEM_CACHE_CREATE("uvm_tracker_t", UvmTracker);
uvm_channel_mgmt.c: g_uvmTrackerItemCache = NV_KMEM_CACHE_CREATE("uvm_trackeritem_t", UvmTrackerItem);
uvm_full_ctx_mgmt.c: g_uvmClientChannelCache = NV_KMEM_CACHE_CREATE("uvm_channel_info", UvmClientChannel);
uvm_full_ctx_mgmt.c: g_uvmVaspaceCache = NV_KMEM_CACHE_CREATE("uvm_va_space", UvmVaspace);
uvm_full_ctx_mgmt.c: g_uvmRmMemoryCache = NV_KMEM_CACHE_CREATE("uvm_rm_memory", UvmRmMemory);
uvm_full_ctx_mgmt.c: g_uvmRmMemoryRegionCache = NV_KMEM_CACHE_CREATE("uvm_rm_memory_region", UvmRmMemoryRegion);
uvm_full_ctx_mgmt.c: g_uvmEngCtxCache = NV_KMEM_CACHE_CREATE("uvm_eng_ctx", UvmEngCtx);
uvm_full_ctx_mgmt.c: g_uvmMappingCache = NV_KMEM_CACHE_CREATE("uvm_mapping", struct address_space);
uvm_full_device_mgmt.c: g_uvmPendingFaultCache = NV_KMEM_CACHE_CREATE("uvm_gpu_pending_faults", UvmPendingFault);
uvm_full_fault_handler.c: g_uvmFaultCancelEntryCache = NV_KMEM_CACHE_CREATE("uvm_fault_cancel_info", UvmGpuCancelFaultInfo);
uvm_full_identity_map.c: g_uvmMapListNodeCache = NV_KMEM_CACHE_CREATE("uvm_mapListNode", mapList_node);
uvm_full_pa_mgmt.c: g_uvmPaManagerCache = NV_KMEM_CACHE_CREATE("uvm_pa_manager", UvmPaManager);
uvm_full_pa_mgmt.c: g_UvmPaDescCache = NV_KMEM_CACHE_CREATE("uvm_pa_desc", UvmPaDesc);
uvm_full_pa_mgmt.c: g_uvmPaToVaCache = NV_KMEM_CACHE_CREATE("uvm_pa_to_va", UvmPaToVa);
uvm_full_pagetbl_mgmt.c: g_uvm2MVaDescCache = NV_KMEM_CACHE_CREATE("uvm_2mvadesc", Uvm2MbVaDesc);
uvm_full_pagetbl_mgmt.c: g_uvmGpuPdeCache = NV_KMEM_CACHE_CREATE("uvm_gpupde", UvmGpuPde);
uvm_full_pagetbl_mgmt.c: g_uvmPbOpsCache = NV_KMEM_CACHE_CREATE("uvm_pbops", UvmPbOps);
uvm_full_va_trie.c: g_uvmVatL0Cache = NV_KMEM_CACHE_CREATE("uvm_vatL0Cache", UvmVatL0);
uvm_full_va_trie.c: g_uvmVatL1Cache = NV_KMEM_CACHE_CREATE("uvm_vatL1Cache", UvmVatL1);
uvm_full_va_trie.c: g_uvmVatL2Cache = NV_KMEM_CACHE_CREATE("uvm_vatL2Cache", UvmVatL2);
uvm_full_va_trie.c: g_uvmVatGpuHeaderCache = NV_KMEM_CACHE_CREATE("uvm_vatGpuHeaderCache", UvmVatLevelGpuHeader);
uvm_kernel_counters.c: g_UvmCounterContainerCache = NV_KMEM_CACHE_CREATE("uvm_counter_container_t", struct UvmCounterContainer_tag);
uvm_kernel_events.c: g_uvmEventContainerCache = NV_KMEM_CACHE_CREATE("uvm_event_container_t", struct UvmEventContainer_tag);
uvm_lite.c: g_uvmPrivateCache = NV_KMEM_CACHE_CREATE("uvm_private_t", struct DriverPrivate_tag);
uvm_lite.c: g_uvmCommitRecordCache = NV_KMEM_CACHE_CREATE("uvm_commit_record_t", struct UvmCommitRecord_tag);
uvm_lite.c: g_uvmMigTrackerCache = NV_KMEM_CACHE_CREATE("uvm_mig_tracker_t", struct UvmGpuMigrationTracking_tag);
uvm_lite.c: g_uvmStreamRecordCache = NV_KMEM_CACHE_CREATE("uvm_stream_record_t", struct UvmStreamRecord_tag);
uvm_lite.c: g_uvmMappingCache = NV_KMEM_CACHE_CREATE("uvm_mapping_t", struct address_space);
uvm_lite.c: g_uvmMpsServerCache = NV_KMEM_CACHE_CREATE("uvm_mps_server_t", struct UvmMpsServer_tag);
uvm_lite_prefetch.c: g_uvmLitePrefetchRegionAccessCache = NV_KMEM_CACHE_CREATE("UvmRegionAccess", UvmRegionAccess);
uvm_lite_region_tracking.c: g_uvmTrackingTreeCache = NV_KMEM_CACHE_CREATE("uvm_region_tracker_t", struct s_UvmRegionTracker);
uvm_lite_region_tracking.c: g_uvmTrackingTreeNodeCache = NV_KMEM_CACHE_CREATE("uvm_region_tracker_node_t", struct _tree_node);
uvm_page_cache.c: g_uvmPageTrackingCache = NV_KMEM_CACHE_CREATE("uvm_page_tracking_t", struct UvmPageTracking_tag);
Any chance you could tell me which ones need SLAB_USERCOPY as the third argument and which ones need a 0 or possibly provide a patch, similar to https://grsecurity.net/~paxguy1/nvidia- ... -pax.patch? Also, the second part of that patch to kernel/nvidia-uvm/uvm_common.c also has issues with the new NVIDIA driver. This looks like it can be fixed with something similar to this:
{
NV_STATUS status = NV_OK;
- g_exportedUvmOps.startDevice = uvm_gpu_event_start_device;
- g_exportedUvmOps.stopDevice = uvm_gpu_event_stop_device;
- if (uvmnext_activated())
- g_exportedUvmOps.isrTopHalf = uvmnext_isr_top_half;
- else if (uvmfull_activated())
- g_exportedUvmOps.isrTopHalf = uvmfull_isr_top_half;
- fi
+#ifdef NVIDIA_UVM_NEXT_ENABLED
+ static struct UvmOpsUvmEvents g_exportedUvmOps = {
+ .startDevice = uvm_gpu_event_start_device,
+ .stopDevice = uvm_gpu_event_stop_device,
+ .isrTopHalf = uvmnext_isr_top_half,
+ };
+#else
+ static struct UvmOpsUvmEvents g_exportedUvmOps = {
+ .startDevice = uvm_gpu_event_start_device,
+ .stopDevice = uvm_gpu_event_stop_device,
+ .isrTopHalf = uvmfull_isr_top_half,
+ };
+#endif //NVIDIA_UVM_NEXT_ENABLED
// call RM to exchange the function pointers.
status = nvUvmInterfaceRegisterUvmCallbacks(&g_exportedUvmOps);
Thanks for any help you can provide