从Tensorflow变量中提取值

cigdeys3  于 2023-03-19  发布在  其他
关注(0)|答案(3)|浏览(145)

我是Python和Tensorflow的新手,在训练阶段后,我在从NN获取值时遇到了一些困难。

import tensorflow as tf
import numpy as np
import input_data

mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)

n_nodes_hl1 = 50
n_nodes_hl2 = 50

n_classes = 10
batch_size = 128

x = tf.placeholder('float',[None, 784])
y = tf.placeholder('float')

def neural_network_model(data):

    hidden_1_layer = {'weights': tf.Variable(tf.random_normal([784,n_nodes_hl1]),name='weights1'),
                      'biases': tf.Variable(tf.random_normal([n_nodes_hl1]),name='biases1')}
    hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2]),name='weights2'),
                      'biases': tf.Variable(tf.random_normal([n_nodes_hl2]),name='biases2')}
    output_layer =   {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_classes]),name='weights3'),
                      'biases': tf.Variable(tf.random_normal([n_classes]),name='biases3')}

    l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']) , hidden_1_layer['biases'])
    l1 = tf.nn.relu(l1)

    l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']) , hidden_2_layer['biases'])
    l2 = tf.nn.relu(l2)

    output = tf.add(tf.matmul(l2, output_layer['weights']) , output_layer['biases'])

     return output

def train_neural_network(x):
    prediction = neural_network_model(x)
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,labels=y))
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    hm_epochs = 100
    init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer() )
    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(hm_epochs):
            epoch_loss = 0
            for _ in range(int(mnist.train.num_examples / batch_size)) :
                 ep_x, ep_y = mnist.train.next_batch(batch_size)
                _, c = sess.run([optimizer, cost], feed_dict = {x: ep_x, y: ep_y})
                epoch_loss += c
            print('Epoch', epoch+1, 'completed out of', hm_epochs, 'loss:',epoch_loss)

        correct = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        print('Accuracy:', accuracy.eval({x:mnist.test.images, y: mnist.test.labels}))

train_neural_network(x)

我尝试使用以下方法从第1层提取权重:

w = tf.get_variable('weights1',shape=[784,50])
    b = tf.get_variable('biases1',shape=[50,])
    myWeights, myBiases = sess.run([w,b])

但是这个投掷误差Attempting to use uninitialized value weights1_1
这是因为我的变量在dict类型“hidden_1_layer”中吗?
我对Python和Tensorflow数据类型还不是很熟悉,所以我完全搞不懂!

u0njafvf

u0njafvf1#

使用以下代码:

tensor_1 = tf.get_default_graph().get_tensor_by_name("weights1:0")
tesnor_2 = tf.get_default_graph().get_tensor_by_name("biases1:0")
sess = tf.Session()
np_arrays = sess.run([tensor_1, tensor_2])

还有其他方法可以存储变量以供以后使用或分析。请指定提取权重和偏差的目的。如果需要进一步讨论,请进一步注解。

2g32fytz

2g32fytz2#

当你写作的时候

w = tf.get_variable('weights1',shape=[784,50])
b = tf.get_variable('biases1',shape=[50,])

您正在定义2个新变量:

  1. weights1变为weights1_1
  2. biases1变为biases1_1
    因为变量名weights1biases1在图中已经存在,所以tensorflow为您添加了后缀_<counter>,以避免命名冲突。
    如果你想创建一个已经存在的变量的引用,你必须熟悉variable scope的概念。
    简而言之,必须明确表示要重用某个变量,可以使用[tf.variable_scope ] 2及其reuse参数来实现这一点。
scope_name =  "" #default scope
with tf.variable_scope(scope_name, reuse=True):
    w = tf.get_variable('weights1',shape=[784,50])
    b = tf.get_variable('biases1',shape=[50,])
hyrbngr7

hyrbngr73#

要训练它的值,你也可以这样做,自定义回调方法!

class custom_callback(tf.keras.callbacks.Callback): 
    tf.summary.create_file_writer(val_dir)      
    
    def _val_writer(self):
        if 'val' not in self._writers:
            self._writers['val'] = tf.summary.create_file_writer(val_dir)
        return self._writers['val']
    
    def on_epoch_end(self, epoch, logs={}):
        print('weights: ' + str(self.model.get_weights()))
        
        if self.model.optimizer and hasattr(self.model.optimizer, 'iterations'):
            with tf.summary.record_if(True): # self._val_writer.as_default():
                step = ''
                for name, value in logs.items():
                    tf.summary.scalar(
                    'evaluation_' + name + '_vs_iterations',
                    value,
                    step=self.model.optimizer.iterations.read_value(),
                    )           
        if(logs['accuracy'] == None) : pass
        else:
            if(logs['accuracy']> 0.90):
                self.model.stop_training = True
    
        with tf.compat.v1.variable_scope('Value', reuse=tf.compat.v1.AUTO_REUSE):                   
            w1 = tf.compat.v1.get_variable('w2', shape=[256])
            b1 = tf.compat.v1.get_variable('b2', shape=[256,])
            
            print('w1:' + str(w1))
            print('b1:' + str(b1))

custom_callback = custom_callback()

history = model_highscores.fit(batched_features, epochs=99 ,validation_data=(dataset.shuffle(len(list_image))), callbacks=[custom_callback])

相关问题