Test TensorFlow 2

master
Gerber, Mike 4 years ago
parent cfeafeb57f
commit 5fa9406fbd

@ -1,11 +0,0 @@
FROM nvidia/cuda:10.0-cudnn7-runtime-ubuntu18.04
RUN apt-get update &&\
apt-get install -y python3 python3-pip &&\
apt-get clean && rm -rf /var/lib/apt/lists/*
COPY requirements.txt /tmp
RUN pip3 install --no-cache-dir --upgrade pip && \
pip3 install --no-cache-dir -r /tmp/requirements.txt
COPY test-nvidia /usr/bin
CMD ["/usr/bin/test-nvidia"]

@ -0,0 +1,7 @@
Test Nvidia environment in relation to TensorFlow
=================================================
* `./run` tests the native system. One of tf1 or tf2 is expected to have no
GPU available due to CUDA library incompatibility
* `./run-docker` tests Docker support. Both TensorFlow versions should work
as we're using a base image compatible to the respective version.

@ -0,0 +1,19 @@
ARG BASE_IMAGE
FROM $BASE_IMAGE
ARG tf
RUN apt-get update && \
apt-get install -y python3 python3-distutils curl &&\
apt-get clean && rm -rf /var/lib/apt/lists/* && \
\
curl -sSL https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
python3 get-pip.py && \
rm -f get-pip.py
COPY assets/requirements-$tf.txt /tmp
RUN pip install --no-cache-dir -r /tmp/requirements-$tf.txt
COPY test-nvidia /usr/bin
CMD ["/usr/bin/test-nvidia"]

@ -0,0 +1 @@
tensorflow == 2.*

18
run

@ -1,12 +1,16 @@
#!/bin/sh #!/bin/sh
vdir=`mktemp -d /tmp/test-nvidia.XXXXXX` for tf in tf1 tf2; do
echo "== $tf"
vdir=`mktemp -d /tmp/test-nvidia.XXXXXX`
virtualenv -p /usr/bin/python3 $vdir # Need Python 3.7 here as TF1 does not support 3.8
. $vdir/bin/activate virtualenv -q -p /usr/bin/python3.7 $vdir >/dev/null
. $vdir/bin/activate
pip3 install -r requirements.txt pip install -q -r assets/requirements-$tf.txt
python3 test-nvidia python3 test-nvidia
deactivate deactivate
rm --preserve-root -rf $vdir rm --preserve-root -rf $vdir
done

@ -1,3 +1,14 @@
#!/bin/sh #!/bin/sh
docker build -t test-nvidia `dirname $0` set -e
docker run --gpus all -it --rm test-nvidia
for tf in tf1 tf2; do
echo "== $tf"
case "$tf" in
tf1) BASE_IMAGE=nvidia/cuda:10.0-cudnn7-runtime-ubuntu18.04;;
tf2) BASE_IMAGE=nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04;;
esac
work_dir=`dirname $0`
image_id=`docker build -q --build-arg tf=$tf --build-arg BASE_IMAGE=$BASE_IMAGE -f assets/Dockerfile $work_dir`
docker run --gpus all -it --rm $image_id
docker rmi $image_id >/dev/null || true
done

@ -1,13 +1,21 @@
#!/usr/bin/python3 #!/usr/bin/python3
import os import os
import tensorflow as tf
os.system('nvidia-smi') os.system('nvidia-smi -L')
hello = tf.constant('Hello, TensorFlow!') os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # '1' means >= WARN
sess = tf.compat.v1.Session() import tensorflow as tf
print(sess.run(hello)) print('TensorFlow', tf.__version__)
print('GPU available:', tf.test.is_gpu_available(cuda_only=True)) with tf.compat.v1.Session() as sess:
hello = tf.constant('Hello, TensorFlow!')
result = sess.run(hello)
#print(result)
if hasattr(tf.config, 'list_physical_devices'):
# TensorFlow 2
is_gpu_available = len(tf.config.list_physical_devices('GPU')) > 0
print('GPU available:', is_gpu_available)
else:
print('GPU available:', tf.test.is_gpu_available(cuda_only=True))

Loading…
Cancel
Save