Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

@shkhln
Created March 17, 2020 23:58
Show Gist options
  • Star 4 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save shkhln/40ef290463e78fb2b0000c60f4ad797e to your computer and use it in GitHub Desktop.
Save shkhln/40ef290463e78fb2b0000c60f4ad797e to your computer and use it in GitHub Desktop.
#define _GNU_SOURCE
#include <dlfcn.h>
#include <fcntl.h>
#include <string.h>
#include <stdarg.h>
#include <stdint.h>
// pkg install linux-c7-devtools
// /compat/linux/bin/cc --sysroot=/compat/linux -m64 -std=c99 -Wall -ldl -fPIC -shared -o dummy-uvm.so uvm_ioctl_override.c
// env LD_PRELOAD=$PWD/dummy-uvm.so <cmd>
#define NV_UVM_INITIALIZE 0x30000001
#define NV_UVM_DEINITIALIZE 0x30000002
#define NV_ERR_NOT_SUPPORTED 0x56
struct NvUvmInitParams
{
uint64_t flags __attribute__((aligned(8)));
uint32_t status;
};
int (*libc_ioctl)(int fd, unsigned long request, ...) = NULL;
int ioctl(int fd, unsigned long request, ...) {
if (!libc_ioctl) {
libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
}
va_list _args_;
va_start(_args_, request);
void* data = va_arg(_args_, void*);
va_end(_args_);
if (request == NV_UVM_INITIALIZE) {
struct NvUvmInitParams* params = (struct NvUvmInitParams*)data;
params->status = NV_ERR_NOT_SUPPORTED;
return 0;
}
if (request == NV_UVM_DEINITIALIZE) {
return 0;
}
return libc_ioctl(fd, request, data);
}
int (*libc_open)(const char* path, int flags, ...) = NULL;
int open(const char* path, int flags, ...) {
if (!libc_open) { libc_open = dlsym(RTLD_NEXT, "open"); }
mode_t mode = 0;
va_list _args_;
va_start(_args_, flags);
if (flags & O_CREAT) {
mode = va_arg(_args_, int);
}
va_end(_args_);
if (strcmp("/dev/nvidia-uvm", path) == 0) {
return libc_open("/dev/null", flags, mode);
}
return libc_open(path, flags, mode);
}
@verm
Copy link

verm commented Sep 2, 2022

This is fantastic thank you. I was able to use miniconda-installer from ports to get conda and get stable-diffusion plus all of pytorch working perfectly using CUDA on FreeBSD 13.1.

@shkhln
Copy link
Author

shkhln commented Sep 3, 2022

@verm Great! Although it's probably a good idea make a post on the FreeBSD Forums (or even that dumb /r/freebsd place) for more visibility.

@rajhlinux
Copy link

verm

This is fantastic thank you. I was able to use miniconda-installer from ports to get conda and get stable-diffusion plus all of pytorch working perfectly using CUDA on FreeBSD 13.1.

Can you please make a tutorial on how you exactly did this? Trying to use CUDA toolkit on FreeBSD 13.1.

Thanks.

@verm
Copy link

verm commented Nov 21, 2022

Okay I'll write something up in the next day or two and post it to the lists + forums.

@verm
Copy link

verm commented Jan 1, 2023

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment