import warnings
warnings.filterwarnings('ignore')
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import *
print(sys.version_info)
print(sys.version)
for module in mpl,np,pd,sklearn,tf,keras:
print(module.__name__,module.__version__)
#tf.function and auto-graph
def scaled_elu(z,scale=1.0,alpha=1.0):
# z>=0? scale * z
is_positive=tf.greater_equal(z,0.0)
return scale * tf.where(is_positive,z,alpha*tf.nn.elu(z))
print(scaled_elu(tf.constant([-4.,-2])))
#根据python 方法转化成 tf 方法
scaled_elu_tf=tf.function(scaled_elu)
print(scaled_elu_tf(tf.constant([-4.,-3.])))
#根据 tf 方法 还原 python 方法
print(scaled_elu_tf.python_function is scaled_elu)
#tf 方法有什么优势,
#快!
%timeit scaled_elu(tf.random.normal((10000,10000)))
%timeit scaled_elu_tf(tf.random.normal((10000,10000)))
# 2.86 s ± 199 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
# 2.02 s ± 79.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
#这仅仅是在 cpu 测试的结果,如果采用 gpu 加速会更加明显