modinfo nvidia
cat /usr/local/cuda/version.txt
nvcc --version
cat /usr/local/cuda/include/cudnn.h | grep CUDNN_MAJOR -A 2
cat /usr/include/cudnn.h | grep CUDNN_MAJOR -A 2
nvidia-smi
- for Python 2
python -c 'import tensorflow as tf; print(tf.__version__)'
- for Python 3
python3 -c 'import tensorflow as tf; print(tf.__version__)'
CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES=1 python import pytorch torch.cuda.current_device()
I use the command : cat /usr/include/cudnn.h | grep CUDNN_MAJOR -A 2
but it prints nothing
This is my cudnn.h
/*
*/
/* cudnn : Neural Networks Library
*/
#if !defined(CUDNN_H_)
#define CUDNN_H_
#include <cuda_runtime.h>
#include <stdint.h>
#include "cudnn_version.h"
#include "cudnn_ops_infer.h"
#include "cudnn_ops_train.h"
#include "cudnn_adv_infer.h"
#include "cudnn_adv_train.h"
#include "cudnn_cnn_infer.h"
#include "cudnn_cnn_train.h"
#include "cudnn_backend.h"
#if defined(__cplusplus)
extern "C" {
#endif
#if defined(__cplusplus)
}
#endif
#endif /* CUDNN_H_ */