aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/sparsemax
diff options
context:
space:
mode:
authorGravatar Yifei Feng <yifeif@google.com>2018-01-25 12:02:36 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-25 12:07:22 -0800
commit351c0a533a111636333b4ebeede16485cf679ca9 (patch)
treea0786bc9a8fe7432d69d8095b10586e3ef515b93 /tensorflow/contrib/sparsemax
parenta8c4e8d96de7c0978851a5f9718bbd6b8056d862 (diff)
Add C0330 bad-continuation check to pylint.
PiperOrigin-RevId: 183270896
Diffstat (limited to 'tensorflow/contrib/sparsemax')
-rw-r--r--tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py34
-rw-r--r--tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py48
2 files changed, 34 insertions, 48 deletions
diff --git a/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py b/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py
index c8b4e472c9..360e7dbe75 100644
--- a/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py
+++ b/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py
@@ -105,8 +105,8 @@ class SparsemaxLossTest(test.TestCase):
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
np_loss = self._np_sparsemax_loss(z, q).astype(dtype)
- self.assertAllCloseAccordingToType(np_loss, tf_loss_out,
- half_atol=1e-2, half_rtol=5e-3)
+ self.assertAllCloseAccordingToType(
+ np_loss, tf_loss_out, half_atol=1e-2, half_rtol=5e-3)
self.assertShapeEqual(np_loss, tf_loss_op)
def _test_constant_add(self, dtype, random, use_gpu):
@@ -116,17 +116,17 @@ class SparsemaxLossTest(test.TestCase):
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
- _, tf_loss_zpc = self._tf_sparsemax_loss(
- z + c, q, dtype, use_gpu
- )
+ _, tf_loss_zpc = self._tf_sparsemax_loss(z + c, q, dtype, use_gpu)
- _, tf_loss_z = self._tf_sparsemax_loss(
- z, q, dtype, use_gpu
- )
+ _, tf_loss_z = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
- self.assertAllCloseAccordingToType(tf_loss_zpc, tf_loss_z,
- float_atol=5e-6, float_rtol=5e-6,
- half_atol=1e-2, half_rtol=1e-2)
+ self.assertAllCloseAccordingToType(
+ tf_loss_zpc,
+ tf_loss_z,
+ float_atol=5e-6,
+ float_rtol=5e-6,
+ half_atol=1e-2,
+ half_rtol=1e-2)
def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 4"""
@@ -170,10 +170,7 @@ class SparsemaxLossTest(test.TestCase):
with self.test_session(use_gpu=use_gpu):
err = gradient_checker.compute_gradient_error(
- logits, z.shape,
- loss_op, (test_obs, ),
- x_init_value=z, delta=1e-9
- )
+ logits, z.shape, loss_op, (test_obs,), x_init_value=z, delta=1e-9)
self.assertLess(err, 1e-4)
@@ -192,8 +189,8 @@ class SparsemaxLossTest(test.TestCase):
tf_grad = loss_grad_op.eval()
np_grad = self._np_sparsemax_loss_grad(z, q).astype(dtype)
- self.assertAllCloseAccordingToType(np_grad, tf_grad,
- half_atol=1e-2, half_rtol=5e-3)
+ self.assertAllCloseAccordingToType(
+ np_grad, tf_grad, half_atol=1e-2, half_rtol=5e-3)
self.assertShapeEqual(np_grad, loss_grad_op)
def _test_dtype(self, dtype):
@@ -220,5 +217,6 @@ class SparsemaxLossTest(test.TestCase):
def testDouble(self):
self._test_dtype('float64')
-if __name__ == "__main__":
+
+if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py b/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py
index 82d36ee9cb..259e62bd86 100644
--- a/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py
+++ b/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py
@@ -83,8 +83,8 @@ class SparsemaxTest(test.TestCase):
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p_sparemax = self._np_sparsemax(z).astype(dtype)
- self.assertAllCloseAccordingToType(p_sparemax, tf_sparsemax_out,
- half_atol=5e-3)
+ self.assertAllCloseAccordingToType(
+ p_sparemax, tf_sparsemax_out, half_atol=5e-3)
self.assertShapeEqual(p_sparemax, tf_sparsemax_op)
def _test_sparsemax_of_zero(self, dtype, random, use_gpu):
@@ -111,9 +111,8 @@ class SparsemaxTest(test.TestCase):
p_expected = np.zeros((test_obs, 10), dtype=dtype)
p_expected[np.arange(0, test_obs), z_sort_arg[:, 0]] = 1
- tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(
- (1 / epsilon) * z, dtype, use_gpu
- )
+ tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax((1 / epsilon) * z,
+ dtype, use_gpu)
self.assertAllCloseAccordingToType(p_expected, tf_sparsemax_out)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
@@ -123,16 +122,12 @@ class SparsemaxTest(test.TestCase):
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
c = random.uniform(low=-3, high=3, size=(test_obs, 1)).astype(dtype)
- _, tf_sparsemax_zpc = self._tf_sparsemax(
- z + c, dtype, use_gpu
- )
+ _, tf_sparsemax_zpc = self._tf_sparsemax(z + c, dtype, use_gpu)
- _, tf_sparsemax_z = self._tf_sparsemax(
- z, dtype, use_gpu
- )
+ _, tf_sparsemax_z = self._tf_sparsemax(z, dtype, use_gpu)
- self.assertAllCloseAccordingToType(tf_sparsemax_zpc, tf_sparsemax_z,
- half_atol=5e-3)
+ self.assertAllCloseAccordingToType(
+ tf_sparsemax_zpc, tf_sparsemax_z, half_atol=5e-3)
def _test_permutation(self, dtype, random, use_gpu):
"""check sparsemax proposition 3"""
@@ -143,12 +138,11 @@ class SparsemaxTest(test.TestCase):
per = random.permutation(10)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(
- z[i, per].reshape(1, -1), dtype, use_gpu
- )
+ z[i, per].reshape(1, -1), dtype, use_gpu)
p_expected = p[i, per].reshape(1, -1)
- self.assertAllCloseAccordingToType(p_expected, tf_sparsemax_out,
- half_atol=5e-3)
+ self.assertAllCloseAccordingToType(
+ p_expected, tf_sparsemax_out, half_atol=5e-3)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
def _test_diffrence(self, dtype, random, use_gpu):
@@ -166,18 +160,14 @@ class SparsemaxTest(test.TestCase):
continue
self.assertTrue(
- 0 <= p[val, j] - p[val, i] <= z[val, j] - z[val, i] + etol,
- "0 <= %.10f <= %.10f" % (
- p[val, j] - p[val, i], z[val, j] - z[val, i] + etol
- )
- )
+ 0 <= p[val, j] - p[val, i] <= z[val, j] - z[val, i] + etol,
+ '0 <= %.10f <= %.10f' % (p[val, j] - p[val, i],
+ z[val, j] - z[val, i] + etol))
def _test_two_dimentional(self, dtype, random, use_gpu):
"""check two dimentation sparsemax case"""
t = np.linspace(-2, 2, test_obs, dtype=dtype)
- z = np.vstack([
- t, np.zeros(test_obs, dtype=dtype)
- ]).T
+ z = np.vstack([t, np.zeros(test_obs, dtype=dtype)]).T
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
@@ -196,10 +186,7 @@ class SparsemaxTest(test.TestCase):
with self.test_session(use_gpu=use_gpu):
err = gradient_checker.compute_gradient_error(
- logits, z.shape,
- sparsemax_op, z.shape,
- x_init_value=z, delta=1e-9
- )
+ logits, z.shape, sparsemax_op, z.shape, x_init_value=z, delta=1e-9)
self.assertLess(err, 1e-4)
@@ -248,5 +235,6 @@ class SparsemaxTest(test.TestCase):
def testDouble(self):
self._test_dtype('float64')
-if __name__ == "__main__":
+
+if __name__ == '__main__':
test.main()