0
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
def conv2d(x, w):
    x_narray = something operate on x
    w_narray = something operate on w
    result = my_conv_function(x_narray, w_narray, strides=[1, 1, 1, 1], padding='SAME')
    return result
def max_pool_2_2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],    padding='SAME')


mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
x_image = tf.reshape(x, [-1, 28, 28, 1])

w_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = max_pool_2_2(h_conv1)

w_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = max_pool_2_2(h_conv2)

w_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)

keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

w_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2) + b_fc2)

cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

sess = tf.Session()
with sess.as_default():
    sess.run(tf.initialize_all_variables())
    for i in range(10000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:
            train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
            print("step %d, train_accuracy %g" % (i, train_accuracy))
            train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
    print("test accuracy %g" % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))

In my code, I implement a function called my_conv_function() to replace the tf.nn.conv2d function. My function need numpy.narray type parameters, but the x and y are all tensor type in tensorflow. How could I convert them to numpy.narray type?

2
  • Did you try array = yourTensor.eval(session=yourSession) ? Commented Apr 28, 2017 at 12:35
  • def conv2d(x, w): print(w.eval(session=sess)) result = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME') return result /////could this work? Commented Apr 28, 2017 at 12:58

1 Answer 1

1

sess.run([yourTensor]) or yourTensor.eval() should return a numpy array that you need. I might be wrong, but I was under the impression that doing that too many times slows things down though, as essentially you have to run the graph every time?

Sign up to request clarification or add additional context in comments.

3 Comments

Do you have some advises to implement my idea? I want to use my function which is wrote by python to replace the tensorflow function, but I also need tensorflow functions to deal with my function's result. How could I make them work together well? I am a new user of tensorflow.
The proper way is to write an extension for TensorFlow that actually uses tensors. If you have to evaluate tensors dynamically it will get slow eventually.
Thanks for your advise! Maybe I should do this.

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.