TensorFlow1:
import tensorflow as tf
print(tf.__version__)
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.utils import shuffle
%matplotlib notebook
df = pd.read_csv("D:\\data\\boston.csv",header = 0)
df = df.values
df = np.array(df)
#归一化处理,避免特征值取值范围差异过大造成的权值扰动,将值等比例缩放至0到1之间
for i in range(12):
df[:,i] = (df[:,i] -df[:,i].min()) / (df[:,i].max()- df[:,i].min())
x_data = df[:,0:12]
y_data = df[:,12]
x = tf.compat.v1.placeholder(tf.float32,[None,12],name="X")
y = tf.compat.v1.placeholder(tf.float32,[None,1],name="Y")
with tf.name_scope("Model"):
w = tf.Variable(tf.random.normal([12,1],stddev=0.01),name="w")
b = tf.Variable(1.0,name="b")
def model(x,w,b):
return tf.matmul(x,w) + b
pred = model(x,w,b)
with tf.name_scope("LossFunction"):
loss_function = tf.reduce_mean(tf.pow(y-pred,2))
train_epochs = 50
learning_rate = 0.01
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
sess = tf.compat.v1.Session()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
loss_list = []
log_dir = "d:/logs"
loss_average = 0.0
merged = tf.compat.v1.summary.merge_all()
writer = tf.compat.v1.summary.FileWriter(log_dir,sess.graph)
for epoch in range(train_epochs):
loss_sum = 0.0
for xs,ys in zip(x_data,y_data):
xs = xs.reshape(1,12)
ys = ys.reshape(1,1)
_,loss = sess.run([optimizer,loss_function],feed_dict = {x:xs,y:ys})
# print(type(summary_str),summary_str)
#writer.add_summary(summary_str,epoch)
loss_sum+=loss
x_data,y_data = shuffle(x_data,y_data)
loss_average = loss_sum / len(y_data)
#summary_str = sess.run(sum_loss_op)
#writer.add_summary(summary_str,epoch)
print("epoch:",epoch+1,"loss:",loss_average)
loss_list.append(loss_average)
TensorFlow2:
import tensorflow as tf
print(tf.__version__)
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.utils import shuffle
from sklearn.preprocessing import scale
%matplotlib notebook
df = pd.read_csv("D:\\data\\boston.csv",header = 0)
df = df.values
df = np.array(df)
x_data = df[:,0:12]
y_data = df[:,12]
train_num = 300
valid_num = 100
test_num = len(x_data) - train_num - valid_num
x_train = x_data[:train_num]
y_train = y_data[:train_num]
x_valid = x_data[train_num:train_num + valid_num]
y_valid = y_data[train_num:train_num + valid_num]
x_test = x_data[train_num + valid_num:train_num + valid_num + test_num]
y_test = y_data[train_num + valid_num:train_num + valid_num + test_num]
x_train = tf.cast(scale(x_train),dtype=tf.float32)
x_valid = tf.cast(scale(x_valid),dtype=tf.float32)
x_test = tf.cast(scale(x_test),dtype=tf.float32)
W = tf.Variable(tf.random.normal([12,1],mean=0.0,stddev=0.01),dtype=tf.float32)
B = tf.Variable(tf.zeros(1),dtype=tf.float32)
def model(x,w,b):
return tf.matmul(x,w) + b
train_epochs = 50
learning_rate = 0.01
batch_size = 10
def loss(x,y,w,b):
err = model(x,w,b) - y
squared_err = tf.square(err)
return tf.reduce_mean(squared_err)
def grad(x,y,w,b):
with tf.GradientTape() as tape:
loss_ = loss(x,y,w,b)
return tape.gradient(loss_,[w,b])
optimizer = tf.keras.optimizers.SGD(learning_rate)
loss_list_train = []
loss_list_valid = []
total_step = int(train_num / batch_size)
for epoch in range(train_epochs):
for step in range(total_step):
xs = x_train[step*batch_size:(step+1)*batch_size,:]
ys = y_train[step*batch_size:(step+1)*batch_size]
grads = grad(xs,ys,W,B)
optimizer.apply_gradients(zip(grads,[W,B]))
loss_train = loss(x_train,y_train,W,B).numpy()
loss_valid = loss(x_valid,y_valid,W,B).numpy()
loss_list_train.append(loss_train)
loss_list_valid.append(loss_valid)
print("epoch={:3d},loss_train={:.4f},loss_valid={:.4f}".format(epoch+1,loss_train,loss_valid))