Tensorflow自定义训练模型的样例写法

标签:#tensorflow# 时间:2019/05/19 19:38:08 作者:小木

本篇博客主要讲述Tensorflow自定义训练的写法。
Tensorflow


import numpy as np
import tensorflow as tf

from tqdm import trange

# define hyper parameters
seed = 1000
data_size = 100
feature_size = 10
label_size = 3
EPOCHS = 10
batch_size = 32


# define model
def create_model():
    return tf.keras.Sequential([
        tf.keras.layers.Dense(batch_size, activation=tf.keras.activations.relu),
        tf.keras.layers.Dense(128, input_shape=(64,), activation=tf.keras.activations.relu),
        tf.keras.layers.Dense(3, input_shape=(128,), activation=tf.keras.activations.sigmoid)
    ])


class MyModel:
    def __init__(self, train_feature, optimizer, train_label=None, is_train=True):
        model = create_model()
        prediction_out = model(train_feature)
        if is_train:
            self.loss = get_loss(train_label, prediction_out)
            # compute gradients
            gradients = optimizer.compute_gradients(self.loss)

            # update gradients
            optimizer.apply_gradients(gradients, global_step=step)

            # in TensorFlow, compute gradients and apply gradients can be merge into minimize method as belows
            # train_op = optimizer.minimize(loss)


# define loss function
def get_loss(real_labels, prediction_out):
    return tf.keras.losses.categorical_crossentropy(real_labels, prediction_out)


def get_optimizer():
    return tf.train.AdagradOptimizer(learning_rate=0.1)


# prepare dataset
features = tf.random.uniform((data_size, feature_size))
labels = tf.random.uniform((data_size, 1), minval=0, maxval=label_size)

dataset = tf.data.Dataset.from_tensor_slices((features, labels)).batch(batch_size).repeat(EPOCHS)

# initialize parameters
iterator = dataset.make_initializable_iterator()
features_batch, labels_batch = iterator.get_next()
total_instances = data_size * EPOCHS
steps_per_epoch = data_size // batch_size if data_size / batch_size == 0 else data_size // batch_size + 1

step = tf.train.get_or_create_global_step()

my_model = MyModel(features_batch, get_optimizer(), labels_batch)

with tf.Session() as sess:
    sess.run(iterator.initializer)

    sess.run(tf.global_variables_initializer())

    # loop EPOCHS
    for epoch in range(EPOCHS):
        tqr = trange(steps_per_epoch, desc="%2d" % (epoch + 1), leave=False)

        average = np.zeros(steps_per_epoch)

        # loop batch
        for _ in tqr:
            average[_] = np.mean(sess.run(my_model.loss))
            # print(average[_])

        print(np.mean(average))
欢迎大家关注DataLearner官方微信,接受最新的AI技术推送