nvidia: update to 381.22.
This commit is contained in:
parent
39d411c39c
commit
7166832647
3 changed files with 5 additions and 369 deletions
|
@ -139,7 +139,7 @@ libnvidia-gtk2.so.346.47 nvidia-gtklibs-346.47_1 ignore
|
|||
libnvidia-gtk3.so.346.47 nvidia-gtklibs-346.47_1 ignore
|
||||
libnvidia-glcore.so.346.47 nvidia340-libs-340.46_1 ignore
|
||||
libnvidia-glsi.so.346.72 nvidia-libs-346.72_1 ignore
|
||||
libnvidia-fatbinaryloader.so.378.13 nvidia-libs-378.13_1 ignore
|
||||
libnvidia-fatbinaryloader.so.381.22 nvidia-libs-381.22_1 ignore
|
||||
libglapi.so.0 libglapi-7.11_1
|
||||
libgbm.so.1 libgbm-9.0_1
|
||||
librsvg-2.so.2 librsvg-2.26.0_1
|
||||
|
|
|
@ -1,359 +0,0 @@
|
|||
--- kernel/common/inc/nv-mm.h
|
||||
+++ kernel/common/inc/nv-mm.h
|
||||
@@ -46,6 +46,8 @@
|
||||
* 2016 Dec 14:5b56d49fc31dbb0487e14ead790fc81ca9fb2c99
|
||||
*/
|
||||
|
||||
+#include <linux/version.h>
|
||||
+
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
|
||||
#if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
|
||||
#define NV_GET_USER_PAGES get_user_pages
|
||||
@@ -92,10 +94,13 @@
|
||||
pages, vmas, NULL);
|
||||
|
||||
#else
|
||||
-
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
|
||||
pages, vmas);
|
||||
-
|
||||
+#else
|
||||
+ return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
|
||||
+ pages, vmas, NULL);
|
||||
+#endif
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
--- kernel/nvidia-drm/nvidia-drm-fence.c
|
||||
+++ kernel/nvidia-drm/nvidia-drm-fence.c
|
||||
@@ -31,7 +31,11 @@
|
||||
|
||||
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
|
||||
struct nv_fence {
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
struct fence base;
|
||||
+#else
|
||||
+ struct dma_fence base;
|
||||
+#endif
|
||||
spinlock_t lock;
|
||||
|
||||
struct nvidia_drm_device *nv_dev;
|
||||
@@ -51,7 +55,11 @@ nv_fence_ready_to_signal(struct nv_fence *nv_fence)
|
||||
|
||||
static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
|
||||
(
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
struct fence *fence
|
||||
+#else
|
||||
+ struct dma_fence *fence
|
||||
+#endif
|
||||
)
|
||||
{
|
||||
return "NVIDIA";
|
||||
@@ -59,7 +67,11 @@ static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
|
||||
|
||||
static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
|
||||
(
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
struct fence *fence
|
||||
+#else
|
||||
+ struct dma_fence *fence
|
||||
+#endif
|
||||
)
|
||||
{
|
||||
return "nvidia.prime";
|
||||
@@ -67,7 +79,11 @@ static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
|
||||
|
||||
static bool nvidia_drm_gem_prime_fence_op_signaled
|
||||
(
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
struct fence *fence
|
||||
+#else
|
||||
+ struct dma_fence *fence
|
||||
+#endif
|
||||
)
|
||||
{
|
||||
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
|
||||
@@ -99,7 +115,11 @@ unlock_struct_mutex:
|
||||
|
||||
static bool nvidia_drm_gem_prime_fence_op_enable_signaling
|
||||
(
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
struct fence *fence
|
||||
+#else
|
||||
+ struct dma_fence *fence
|
||||
+#endif
|
||||
)
|
||||
{
|
||||
bool ret = true;
|
||||
@@ -107,7 +127,11 @@ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
|
||||
struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem;
|
||||
struct nvidia_drm_device *nv_dev = nv_fence->nv_dev;
|
||||
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
if (fence_is_signaled(fence))
|
||||
+#else
|
||||
+ if (dma_fence_is_signaled(fence))
|
||||
+#endif
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -136,7 +160,11 @@ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
|
||||
}
|
||||
|
||||
nv_gem->fenceContext.softFence = fence;
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
fence_get(fence);
|
||||
+#else
|
||||
+ dma_fence_get(fence);
|
||||
+#endif
|
||||
|
||||
unlock_struct_mutex:
|
||||
mutex_unlock(&nv_dev->dev->struct_mutex);
|
||||
@@ -146,7 +174,11 @@ unlock_struct_mutex:
|
||||
|
||||
static void nvidia_drm_gem_prime_fence_op_release
|
||||
(
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
struct fence *fence
|
||||
+#else
|
||||
+ struct dma_fence *fence
|
||||
+#endif
|
||||
)
|
||||
{
|
||||
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
|
||||
@@ -155,7 +187,11 @@ static void nvidia_drm_gem_prime_fence_op_release
|
||||
|
||||
static signed long nvidia_drm_gem_prime_fence_op_wait
|
||||
(
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
struct fence *fence,
|
||||
+#else
|
||||
+ struct dma_fence *fence,
|
||||
+#endif
|
||||
bool intr,
|
||||
signed long timeout
|
||||
)
|
||||
@@ -170,12 +206,20 @@ static signed long nvidia_drm_gem_prime_fence_op_wait
|
||||
* that it should never get hit during normal operation, but not so long
|
||||
* that the system becomes unresponsive.
|
||||
*/
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
return fence_default_wait(fence, intr,
|
||||
+#else
|
||||
+ return dma_fence_default_wait(fence, intr,
|
||||
+#endif
|
||||
(timeout == MAX_SCHEDULE_TIMEOUT) ?
|
||||
msecs_to_jiffies(96) : timeout);
|
||||
}
|
||||
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
static const struct fence_ops nvidia_drm_gem_prime_fence_ops = {
|
||||
+#else
|
||||
+static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = {
|
||||
+#endif
|
||||
.get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name,
|
||||
.get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name,
|
||||
.signaled = nvidia_drm_gem_prime_fence_op_signaled,
|
||||
@@ -285,7 +329,11 @@ static void nvidia_drm_gem_prime_fence_signal
|
||||
bool force
|
||||
)
|
||||
{
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
struct fence *fence = nv_gem->fenceContext.softFence;
|
||||
+#else
|
||||
+ struct dma_fence *fence = nv_gem->fenceContext.softFence;
|
||||
+#endif
|
||||
|
||||
WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex));
|
||||
|
||||
@@ -301,10 +349,18 @@ static void nvidia_drm_gem_prime_fence_signal
|
||||
|
||||
if (force || nv_fence_ready_to_signal(nv_fence))
|
||||
{
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
fence_signal(&nv_fence->base);
|
||||
+#else
|
||||
+ dma_fence_signal(&nv_fence->base);
|
||||
+#endif
|
||||
|
||||
nv_gem->fenceContext.softFence = NULL;
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
fence_put(&nv_fence->base);
|
||||
+#else
|
||||
+ dma_fence_put(&nv_fence->base);
|
||||
+#endif
|
||||
|
||||
nvKms->disableChannelEvent(nv_dev->pDevice,
|
||||
nv_gem->fenceContext.cb);
|
||||
@@ -320,7 +376,11 @@ static void nvidia_drm_gem_prime_fence_signal
|
||||
|
||||
nv_fence = container_of(fence, struct nv_fence, base);
|
||||
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
fence_signal(&nv_fence->base);
|
||||
+#else
|
||||
+ dma_fence_signal(&nv_fence->base);
|
||||
+#endif
|
||||
}
|
||||
}
|
||||
|
||||
@@ -513,7 +573,11 @@ int nvidia_drm_gem_prime_fence_init
|
||||
* fence_context_alloc() cannot fail, so we do not need to check a return
|
||||
* value.
|
||||
*/
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
nv_gem->fenceContext.context = fence_context_alloc(1);
|
||||
+#else
|
||||
+ nv_gem->fenceContext.context = dma_fence_context_alloc(1);
|
||||
+#endif
|
||||
|
||||
ret = nvidia_drm_gem_prime_fence_import_semaphore(
|
||||
nv_dev, nv_gem, p->index,
|
||||
@@ -670,7 +734,11 @@ int nvidia_drm_gem_prime_fence_attach
|
||||
nv_fence->nv_gem = nv_gem;
|
||||
|
||||
spin_lock_init(&nv_fence->lock);
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
|
||||
+#else
|
||||
+ dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
|
||||
+#endif
|
||||
&nv_fence->lock, nv_gem->fenceContext.context,
|
||||
p->sem_thresh);
|
||||
|
||||
@@ -680,7 +748,11 @@ int nvidia_drm_gem_prime_fence_attach
|
||||
|
||||
reservation_object_add_excl_fence(&nv_gem->fenceContext.resv,
|
||||
&nv_fence->base);
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
fence_put(&nv_fence->base); /* Reservation object has reference */
|
||||
+#else
|
||||
+ dma_fence_put(&nv_fence->base);
|
||||
+#endif
|
||||
|
||||
ret = 0;
|
||||
|
||||
--- kernel/nvidia-drm/nvidia-drm-gem.h
|
||||
+++ kernel/nvidia-drm/nvidia-drm-gem.h
|
||||
@@ -29,6 +29,8 @@
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
|
||||
+#include <linux/version.h>
|
||||
+
|
||||
#include <drm/drmP.h>
|
||||
#include "nvkms-kapi.h"
|
||||
|
||||
@@ -98,7 +100,11 @@ struct nvidia_drm_gem_object
|
||||
/* Software signaling structures */
|
||||
struct NvKmsKapiChannelEvent *cb;
|
||||
struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs;
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
struct fence *softFence; /* Fence for software signaling */
|
||||
+#else
|
||||
+ struct dma_fence *softFence;
|
||||
+#endif
|
||||
} fenceContext;
|
||||
#endif
|
||||
};
|
||||
|
||||
--- kernel/nvidia-drm/nvidia-drm-priv.h
|
||||
+++ kernel/nvidia-drm/nvidia-drm-priv.h
|
||||
@@ -25,6 +25,8 @@
|
||||
|
||||
#include "conftest.h" /* NV_DRM_AVAILABLE */
|
||||
|
||||
+#include <linux/version.h>
|
||||
+
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include <drm/drmP.h>
|
||||
@@ -34,7 +36,12 @@
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
#include <linux/fence.h>
|
||||
+#else
|
||||
+#include <linux/dma-fence.h>
|
||||
+#endif
|
||||
+
|
||||
#include <linux/reservation.h>
|
||||
#endif
|
||||
|
||||
--- kernel/nvidia/nv-pat.c
|
||||
+++ kernel/nvidia/nv-pat.c
|
||||
@@ -203,6 +203,7 @@ void nv_disable_pat_support(void)
|
||||
}
|
||||
|
||||
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
static int
|
||||
nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
{
|
||||
@@ -234,6 +235,34 @@ static struct notifier_block nv_hotcpu_nfb = {
|
||||
.notifier_call = nvidia_cpu_callback,
|
||||
.priority = 0
|
||||
};
|
||||
+#else
|
||||
+static int nvidia_cpu_online(unsigned int hcpu)
|
||||
+{
|
||||
+ unsigned int cpu = get_cpu();
|
||||
+ if (cpu == hcpu)
|
||||
+ nv_setup_pat_entries(NULL);
|
||||
+ else
|
||||
+ NV_SMP_CALL_FUNCTION(nv_setup_pat_entries, (void *)(long int)hcpu, 1);
|
||||
+
|
||||
+ put_cpu();
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static int nvidia_cpu_down_prep(unsigned int hcpu)
|
||||
+{
|
||||
+ unsigned int cpu = get_cpu();
|
||||
+ if (cpu == hcpu)
|
||||
+ nv_restore_pat_entries(NULL);
|
||||
+ else
|
||||
+ NV_SMP_CALL_FUNCTION(nv_restore_pat_entries, (void *)(long int)hcpu, 1);
|
||||
+
|
||||
+ put_cpu();
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
#endif
|
||||
|
||||
int nv_init_pat_support(nvidia_stack_t *sp)
|
||||
@@ -255,7 +284,14 @@ int nv_init_pat_support(nvidia_stack_t *sp)
|
||||
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
|
||||
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
|
||||
{
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
if (register_hotcpu_notifier(&nv_hotcpu_nfb) != 0)
|
||||
+#else
|
||||
+ if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
||||
+ "gpu/nvidia:online",
|
||||
+ nvidia_cpu_online,
|
||||
+ nvidia_cpu_down_prep) != 0)
|
||||
+#endif
|
||||
{
|
||||
nv_disable_pat_support();
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
@@ -280,7 +316,11 @@ void nv_teardown_pat_support(void)
|
||||
{
|
||||
nv_disable_pat_support();
|
||||
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
|
||||
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
|
||||
unregister_hotcpu_notifier(&nv_hotcpu_nfb);
|
||||
+#else
|
||||
+ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
|
||||
+#endif
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
@ -3,8 +3,8 @@
|
|||
_desc="NVIDIA drivers for linux (long-lived series)"
|
||||
|
||||
pkgname=nvidia
|
||||
version=378.13
|
||||
revision=4
|
||||
version=381.22
|
||||
revision=1
|
||||
maintainer="Juan RP <xtraeme@voidlinux.eu>"
|
||||
license="Proprietary NVIDIA license"
|
||||
homepage="http://www.nvidia.com"
|
||||
|
@ -24,11 +24,11 @@ build_options_default="glvnd"
|
|||
if [ "$XBPS_TARGET_MACHINE" = "i686" ]; then
|
||||
_pkg="NVIDIA-Linux-x86-${version}"
|
||||
distfiles="http://uk.download.nvidia.com/XFree86/Linux-x86/${version}/${_pkg}.run"
|
||||
checksum=05e62a6098aac7373438ee381072253a861d56522f74948c2b714e20e69a46b1
|
||||
checksum=7b7dd6ee1c871dc5367fc207bba65077c3820a683decbfe6126fc70c0d1b9d08
|
||||
else
|
||||
_pkg="NVIDIA-Linux-x86_64-${version}-no-compat32"
|
||||
distfiles="http://uk.download.nvidia.com/XFree86/Linux-x86_64/${version}/${_pkg}.run"
|
||||
checksum=a97a2ab047759a0b2c4abab5601e6f027230d355615ee745e24e738ee21cf5da
|
||||
checksum=c2468130af124bfe748bdf2bc4c08952a81b35d2bdb87d1217717e6a576217e8
|
||||
fi
|
||||
|
||||
subpackages="nvidia-gtklibs nvidia-dkms nvidia-opencl nvidia-libs"
|
||||
|
@ -39,11 +39,6 @@ do_extract() {
|
|||
./${_pkg}.run --extract-only
|
||||
rm -f ${_pkg}.run
|
||||
}
|
||||
do_configure() {
|
||||
cd ${_pkg}
|
||||
# patches
|
||||
patch -p0 < ${FILESDIR}/kernel_4.10.patch
|
||||
}
|
||||
do_install() {
|
||||
cd ${_pkg}
|
||||
|
||||
|
|
Loading…
Reference in a new issue