Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Create placeholders automatically #7

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
55 changes: 55 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,51 @@ x.backward()
assert np.allclose((a.grad, b.grad), (3., 24.))
```

or simply wrap an existing tensorflow function

```python
def tf_function(a, b):
c = 3 * a + 4 * b * b

return c

session = tf.compat.v1.Session()
f = tfpyth.wrap_torch_from_tensorflow(
tf_function, ["a", "b"], session=session
)
# or simpler
f = tfpyth.wrap_torch_from_tensorflow(
tf_function, session=session
) # automatically creates placeholders for "a" and "b" inside
# or even simpler
f = tfpyth.wrap_torch_from_tensorflow(
tf_function
) # automatically creates placeholders for "a" and "b" and session

a_ = th.tensor(1, dtype=th.float32, requires_grad=True)
b_ = th.tensor(3, dtype=th.float32, requires_grad=True)
x = f(a_, b_)

assert x == 39.0

x.backward()

assert np.allclose((a_.grad, b_.grad), (3.0, 24.0))
```

* see `tests` for more examples


## What it's got

### `torch_from_tensorflow`

Creates a PyTorch function that is differentiable by evaluating a TensorFlow output tensor given input placeholders.

### `wrap_torch_from_tensorflow`

Wrap a TensorFlow function into a PyTorch function and automatically create placeholders

### `eager_tensorflow_from_torch`

Creates an eager Tensorflow function from a PyTorch function.
Expand All @@ -62,6 +101,22 @@ Creates an eager Tensorflow function from a PyTorch function.

Creates a TensorFlow op/tensor from a PyTorch function.


## Notes on session management


* when using `wrap_torch_from_tensorflow` without `session` argument, a (singleton) session will be created in the background and used for every call to `wrap_torch_from_tensorflow`.
* one can access this session using

```python
import tfpyth

session = tfpyth.SingleSession.get_session()

```



## Future work

- [ ] support JAX
Expand Down
151 changes: 151 additions & 0 deletions tests/test_adapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,3 +55,154 @@ def get_tf_function():
x.backward()

assert np.allclose((a_.grad, b_.grad), (3.0, 24.0))


class Test_tensorflow_in_pytorch:
def test_single_output(self):
session = tf.Session()

def get_tf_function():
a = tf.placeholder(tf.float32, name="a")
b = tf.placeholder(tf.float32, name="b")
c = 3 * a + 4 * b * b

f = tfpyth.torch_from_tensorflow(session, [a, b], c).apply
return f

f = get_tf_function()
a_ = th.tensor(1, dtype=th.float32, requires_grad=True)
b_ = th.tensor(3, dtype=th.float32, requires_grad=True)
x = f(a_, b_)

assert x == 39.0

x.backward()

assert np.allclose((a_.grad, b_.grad), (3.0, 24.0))

def test_multiple_outputs(self):
session = tf.Session()

def get_tf_function():
a = tf.placeholder(tf.float32, name="a")
b = tf.placeholder(tf.float32, name="b")
c = 3 * a + 4 * b * b
d = 6 * a + 8 * b ** 2

f = tfpyth.torch_from_tensorflow(session, [a, b], [c, d])
f1, f2 = [ff.apply for ff in f]
return f1, f2

f1, f2 = get_tf_function()

def f(a, b):
return f1(a, b), f2(a, b)

a_ = th.tensor(1, dtype=th.float32, requires_grad=True)
b_ = th.tensor(3, dtype=th.float32, requires_grad=True)
x1, x2 = f(a_, b_)

assert x1 == 39.0
assert x2 == 78.0

x1.backward()
x2.backward()

assert np.allclose((a_.grad, b_.grad), (9.0, 72.0))


class Test_wrap_torch_from_tensorflow:
def test_image_operation(self):
def tensorflow_function(a, size=(128, 128)):
return tf.image.resize(a, size=size)

from functools import partial

session = tf.compat.v1.Session()
tf_func = partial(tensorflow_function, size=(128, 128))
f_pt = tfpyth.wrap_torch_from_tensorflow(tf_func, ["a"], [(None, 64, 64, 1)], session=session)
x = th.ones((1, 64, 64, 1), dtype=th.float32)
y = f_pt(x)
assert y.shape == (1, 128, 128, 1)

def test_no_gradient_operation(self):
def tensorflow_function(a, size=(128, 128)):
return tf.image.resize(a, size=size)

from functools import partial

session = tf.compat.v1.Session()
tf_func = partial(tensorflow_function, size=(128, 128))
f_pt = tfpyth.wrap_torch_from_tensorflow(tf_func, ["a"], [(None, 64, 64, 1)], session=session)
x = th.ones((1, 64, 64, 1), dtype=th.float32, requires_grad=False)
conv = th.nn.Conv2d(1, 1, 1)
x = conv(tfpyth.th_2D_channels_last_to_first(x))
x = tfpyth.th_2D_channels_first_to_last(x)
y = f_pt(x)

assert y.shape == (1, 128, 128, 1)
assert y.sum().backward() is None
assert conv.bias.grad

def test_tensorflow_in_pytorch(self):
session = tf.compat.v1.Session()

def get_tf_function(a, b):
c = 3 * a + 4 * b * b

return c

session = tf.compat.v1.Session()
f = tfpyth.wrap_torch_from_tensorflow(get_tf_function, ["a", "b"], None, session=session)
a_ = th.tensor(1, dtype=th.float32, requires_grad=True)
b_ = th.tensor(3, dtype=th.float32, requires_grad=True)
x = f(a_, b_)

assert x == 39.0

x.backward()

assert np.allclose((a_.grad, b_.grad), (3.0, 24.0))

def test_multiple_outputs(self):
session = tf.compat.v1.Session()

def get_tf_function(a, b):
c = 3 * a + 4 * b * b
d = 6 * a + 8 * b ** 2

return c, d

session = tf.compat.v1.Session()
f = tfpyth.wrap_torch_from_tensorflow(get_tf_function, ["a", "b"], None, session=session)
a_ = th.tensor(1, dtype=th.float32, requires_grad=True)
b_ = th.tensor(3, dtype=th.float32, requires_grad=True)
x1, x2 = f(a_, b_)

assert x1 == 39.0
assert x2 == 78.0

x1.backward()
assert np.allclose((a_.grad, b_.grad), (3.0, 24.0))
x2.backward() # partial derivatives are additive
assert np.allclose((a_.grad, b_.grad), (9.0, 72.0))

def test_autodetect_varnames(self):
session = tf.compat.v1.Session()

def get_tf_function(a, b):
c = 3 * a + 4 * b * b

return c

session = tf.compat.v1.Session()
f = tfpyth.wrap_torch_from_tensorflow(get_tf_function)
a_ = th.tensor(1, dtype=th.float32, requires_grad=True)
b_ = th.tensor(3, dtype=th.float32, requires_grad=True)
x = f(a_, b_)

assert x == 39.0

x.backward()

assert np.allclose((a_.grad, b_.grad), (3.0, 24.0))
Loading