⬆️ Update for TensorFlow 2.5+2.6
This commit is contained in:
		
							parent
							
								
									5e20d251d6
								
							
						
					
					
						commit
						5d05953b5f
					
				
					 6 changed files with 77 additions and 19 deletions
				
			
		
							
								
								
									
										42
									
								
								README.md
									
										
									
									
									
								
							
							
						
						
									
										42
									
								
								README.md
									
										
									
									
									
								
							|  | @ -25,12 +25,42 @@ GPU available: True | |||
| 
 | ||||
| Results | ||||
| ======= | ||||
| As of 2020-09, the only combinations that are working: | ||||
| As of 2021-10, the only combinations that are working: | ||||
| 
 | ||||
| * TensorFlow 1.15.3 using CUDA Toolkit 10.0 | ||||
| * TensorFlow 2.3.0 using CUDA Toolkit 10.1 | ||||
| * (TensorFlow 2.3.0 using CUDA Toolkit 10.1) | ||||
| * TensorFlow 2.4.3 using CUDA Toolkit 11.0 | ||||
| * TensorFlow 2.5.1 & TensorFlow 2.6.0 using CUDA Toolkit 11.1 & 11.2.1 | ||||
| 
 | ||||
| This is only for pip-installable TensorFlow, not self-compiled nor Anaconda. | ||||
| We also did not test other TensorFlow versions. Note that these are the | ||||
| CUDA *Toolkit* versions, not the CUDA version the driver supports (reported | ||||
| by `nvidia-smi`). | ||||
| This is only for pip-installable TensorFlow from PyPI, not self-compiled nor | ||||
| Anaconda. Note that these are the CUDA *Toolkit* versions, not the CUDA version | ||||
| the driver supports (reported by `nvidia-smi`). | ||||
| 
 | ||||
| Full log `run-docker-compatibility-matrix` | ||||
| ------------------------------------------ | ||||
| ~~~ | ||||
| tensorflow-gpu 1.15 nvidia/cuda:10.0-cudnn7-runtime-ubuntu18.04 True | ||||
| tensorflow-gpu 1.15 nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow-gpu 1.15 nvidia/cuda:10.2-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow-gpu 1.15 nvidia/cuda:11.0-cudnn8-runtime-ubuntu18.04 False | ||||
| tensorflow-gpu 1.15 nvidia/cuda:11.1-cudnn8-runtime-ubuntu18.04 False | ||||
| tensorflow-gpu 1.15 nvidia/cuda:11.2.1-cudnn8-runtime-ubuntu18.04 False | ||||
| tensorflow 2.4.3 nvidia/cuda:10.0-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow 2.4.3 nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow 2.4.3 nvidia/cuda:10.2-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow 2.4.3 nvidia/cuda:11.0-cudnn8-runtime-ubuntu18.04 True | ||||
| tensorflow 2.4.3 nvidia/cuda:11.1-cudnn8-runtime-ubuntu18.04 False | ||||
| tensorflow 2.4.3 nvidia/cuda:11.2.1-cudnn8-runtime-ubuntu18.04 False | ||||
| tensorflow 2.5.1 nvidia/cuda:10.0-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow 2.5.1 nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow 2.5.1 nvidia/cuda:10.2-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow 2.5.1 nvidia/cuda:11.0-cudnn8-runtime-ubuntu18.04 False | ||||
| tensorflow 2.5.1 nvidia/cuda:11.1-cudnn8-runtime-ubuntu18.04 True | ||||
| tensorflow 2.5.1 nvidia/cuda:11.2.1-cudnn8-runtime-ubuntu18.04 True | ||||
| tensorflow 2.6.0 nvidia/cuda:10.0-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow 2.6.0 nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow 2.6.0 nvidia/cuda:10.2-cudnn7-runtime-ubuntu18.04 False | ||||
| tensorflow 2.6.0 nvidia/cuda:11.0-cudnn8-runtime-ubuntu18.04 False | ||||
| tensorflow 2.6.0 nvidia/cuda:11.1-cudnn8-runtime-ubuntu18.04 True | ||||
| tensorflow 2.6.0 nvidia/cuda:11.2.1-cudnn8-runtime-ubuntu18.04 True | ||||
| ~~~ | ||||
|  |  | |||
|  | @ -2,7 +2,9 @@ ARG BASE_IMAGE | |||
| 
 | ||||
| FROM $BASE_IMAGE | ||||
| 
 | ||||
| ARG tf | ||||
| ARG tensorflow_package | ||||
| ARG tensorflow_version | ||||
| ARG test_nvidia_options | ||||
| 
 | ||||
| RUN apt-get update && \ | ||||
|     apt-get install -y python3 python3-distutils curl &&\ | ||||
|  | @ -11,9 +13,9 @@ RUN apt-get update && \ | |||
|     curl -sSL https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \ | ||||
|     python3 get-pip.py && \ | ||||
|     rm -f get-pip.py | ||||
| COPY assets/requirements-$tf.txt /tmp | ||||
| RUN pip install --no-cache-dir -r /tmp/requirements-$tf.txt | ||||
| RUN pip install --no-cache-dir "$tensorflow_package == $tensorflow_version" | ||||
| 
 | ||||
| COPY test-nvidia /usr/bin | ||||
| 
 | ||||
| CMD ["/usr/bin/test-nvidia"] | ||||
| ENV test_nvidia_options $test_nvidia_options | ||||
| CMD /usr/bin/test-nvidia $test_nvidia_options | ||||
|  |  | |||
|  | @ -1 +0,0 @@ | |||
| tensorflow-gpu == 1.15.* | ||||
|  | @ -1 +0,0 @@ | |||
| tensorflow == 2.* | ||||
|  | @ -1,17 +1,29 @@ | |||
| #!/bin/sh | ||||
| set -e | ||||
| 
 | ||||
| for tf in tf1 tf2; do | ||||
| for tensorflow_version in 1.15 2.4.3 2.5.1 2.6.0; do | ||||
|   #  Note: CUDA 11.0 only with CUDNN 8 | ||||
|   for BASE_IMAGE in \ | ||||
|     nvidia/cuda:10.0-cudnn7-runtime-ubuntu18.04 \ | ||||
|     nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 \ | ||||
|     nvidia/cuda:10.2-cudnn7-runtime-ubuntu18.04 \ | ||||
|     nvidia/cuda:11.0-cudnn8-runtime-ubuntu18.04 \ | ||||
|     nvidia/cuda:11.1-cudnn8-runtime-ubuntu18.04 \ | ||||
|     nvidia/cuda:11.2.1-cudnn8-runtime-ubuntu18.04 \ | ||||
|   ; do | ||||
|     echo "== $tf $BASE_IMAGE" | ||||
| 
 | ||||
|     # According to the docs at https://www.tensorflow.org/install/gpu, we should | ||||
|     # use different package names depending on the major version of TF. | ||||
|     if echo $tensorflow_version | grep -q ^1; then | ||||
|       tensorflow_package=tensorflow-gpu | ||||
|     else | ||||
|       tensorflow_package=tensorflow | ||||
|     fi | ||||
| 
 | ||||
| 
 | ||||
|     echo -n "$tensorflow_package $tensorflow_version $BASE_IMAGE " | ||||
|     work_dir=`dirname $0` | ||||
|     image_id=`docker build -q --build-arg tf=$tf --build-arg BASE_IMAGE=$BASE_IMAGE -f assets/Dockerfile $work_dir` | ||||
|     image_id=`docker build -q --build-arg test_nvidia_options="--quiet" --build-arg tensorflow_package=$tensorflow_package --build-arg tensorflow_version=$tensorflow_version --build-arg BASE_IMAGE=$BASE_IMAGE -f assets/Dockerfile $work_dir` | ||||
|     docker run --gpus all -it --rm -e TF_CPP_MIN_LOG_LEVEL=2 $image_id | ||||
|     docker rmi $image_id >/dev/null || true | ||||
|   done | ||||
|  |  | |||
							
								
								
									
										24
									
								
								test-nvidia
									
										
									
									
									
								
							
							
						
						
									
										24
									
								
								test-nvidia
									
										
									
									
									
								
							|  | @ -1,14 +1,24 @@ | |||
| #!/usr/bin/python3 | ||||
| import os | ||||
| import argparse | ||||
| 
 | ||||
| 
 | ||||
| os.system('nvidia-smi -L') | ||||
| parser = argparse.ArgumentParser(description="Test if GPU is available in TensorFlow") | ||||
| parser.add_argument('--quiet', dest='quiet', action='store_true') | ||||
| parser.set_defaults(quiet=False) | ||||
| 
 | ||||
| args = parser.parse_args() | ||||
| 
 | ||||
| 
 | ||||
| if not args.quiet: | ||||
|     os.system('nvidia-smi -L') | ||||
| 
 | ||||
| 
 | ||||
| if not 'TF_CPP_MIN_LOG_LEVEL' in os.environ: | ||||
|     os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'  # '1' means >= WARN | ||||
| import tensorflow as tf | ||||
| print('TensorFlow', tf.__version__) | ||||
| if not args.quiet: | ||||
|     print('TensorFlow', tf.__version__) | ||||
| with tf.compat.v1.Session() as sess: | ||||
|     hello = tf.constant('Hello, TensorFlow!') | ||||
|     result = sess.run(hello) | ||||
|  | @ -17,6 +27,12 @@ with tf.compat.v1.Session() as sess: | |||
| if hasattr(tf.config, 'list_physical_devices'): | ||||
|     # TensorFlow 2 | ||||
|     is_gpu_available = len(tf.config.list_physical_devices('GPU')) > 0 | ||||
|     print('GPU available:', is_gpu_available) | ||||
|     if not args.quiet: | ||||
|         print('GPU available:', is_gpu_available) | ||||
|     else: | ||||
|         print(is_gpu_available) | ||||
| else: | ||||
|     print('GPU available:', tf.test.is_gpu_available(cuda_only=True)) | ||||
|     if not args.quiet: | ||||
|         print('GPU available:', tf.test.is_gpu_available(cuda_only=True)) | ||||
|     else: | ||||
|         print(tf.test.is_gpu_available(cuda_only=True)) | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue