提交 2939f53e 编写于 作者: E Eugene Brevdo 提交者: TensorFlower Gardener

Move SparseTensor into its own framework.py, add get_shape().

Change: 137887950
上级 0a170e45
......@@ -655,7 +655,7 @@ class WALSModel(object):
update_op: An op that assigns the newly computed values to the row/column
factors.
"""
assert isinstance(sp_input, ops.SparseTensor)
assert isinstance(sp_input, tf.SparseTensor)
if update_row_factors:
left = self._row_factors
......
......@@ -20,6 +20,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
......@@ -283,7 +284,7 @@ def is_tensor(x):
Returns:
`True` if `x` is a tensor, `False` if not.
"""
tensor_types = (ops.Tensor, ops.SparseTensor, variables.Variable)
tensor_types = (ops.Tensor, sparse_tensor.SparseTensor, variables.Variable)
return isinstance(x, tensor_types)
......@@ -303,7 +304,7 @@ def with_shape(expected_shape, tensor):
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, ops.SparseTensor):
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
......@@ -376,9 +377,9 @@ def convert_to_tensor_or_sparse_tensor(
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, ops.SparseTensorValue):
value = ops.SparseTensor.from_value(value)
if isinstance(value, ops.SparseTensor):
if isinstance(value, sparse_tensor.SparseTensorValue):
value = sparse_tensor.SparseTensor.from_value(value)
if isinstance(value, sparse_tensor.SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError(
'Sparse dtype: requested = %s, actual = %s' % (
......
......@@ -22,6 +22,7 @@ from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
......@@ -43,7 +44,7 @@ def _get_tensor_repr(t,
if print_tensor_type:
if isinstance(t, ops.Tensor):
t_type_str = "Type: Tensor ({})".format(t.dtype.name)
elif isinstance(t, ops.SparseTensor):
elif isinstance(t, sparse_tensor.SparseTensor):
t_type_str = "Type: SparseTensor ({})".format(t.dtype.name)
elif isinstance(t, tensor_array_ops.TensorArray):
t_type_str = "Type: TensorArray ({})".format(t.dtype.name)
......@@ -51,7 +52,7 @@ def _get_tensor_repr(t,
tensor_list.append(constant_op.constant(t_type_str))
if print_shape:
if isinstance(t, ops.SparseTensor):
if isinstance(t, sparse_tensor.SparseTensor):
tensor_list.append(constant_op.constant("Shape:"))
tensor_list.append(t.shape)
elif isinstance(t, ops.Tensor):
......@@ -66,7 +67,7 @@ def _get_tensor_repr(t,
tensor_list.append(constant_op.constant("First True in Boolean tensor at:"))
tensor_list.append(math_ops.argmax(int_tensor, 0))
if isinstance(t, ops.SparseTensor):
if isinstance(t, sparse_tensor.SparseTensor):
tensor_list.append(constant_op.constant("Sparse indices:"))
tensor_list.append(t.indices)
tensor_list.append(constant_op.constant("Sparse values:"))
......@@ -137,13 +138,13 @@ def print_op(input_,
if isinstance(input_, ops.Tensor):
input_ = logging_ops.Print(input_, tensor_list, message, first_n, summarize,
name)
elif isinstance(input_, ops.SparseTensor):
elif isinstance(input_, sparse_tensor.SparseTensor):
p = logging_ops.Print(
constant_op.constant([]), tensor_list, message, first_n, summarize,
name)
with ops.control_dependencies([p]):
input_ = ops.SparseTensor(array_ops.identity(input_.indices),
input_ = sparse_tensor.SparseTensor(array_ops.identity(input_.indices),
array_ops.identity(input_.values),
array_ops.identity(input_.shape))
elif isinstance(input_, tensor_array_ops.TensorArray):
......
......@@ -22,6 +22,7 @@ from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
......@@ -114,7 +115,8 @@ def safe_embedding_lookup_sparse(embedding_weights,
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = ops.SparseTensor(sparse_ids.indices,
sparse_weights = sparse_tensor.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.shape)
# Prune invalid ids and weights.
......@@ -302,7 +304,7 @@ def hashed_embedding_lookup_sparse(params,
params = list(params)
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, ops.SparseTensor):
if not isinstance(sparse_values, sparse_tensor.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.name_scope(name, "hashed_sparse_embedding_lookup",
......
......@@ -21,7 +21,7 @@ from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import embedding_ops as contrib_embedding_ops
from tensorflow.contrib.layers.python.ops import sparse_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
......@@ -74,14 +74,14 @@ def bow_encoder(ids,
initializer=initializer, regularizer=regularizer,
trainable=trainable)
if sparse_lookup:
if isinstance(ids, ops.SparseTensor):
if isinstance(ids, sparse_tensor.SparseTensor):
sparse_ids = ids
else:
sparse_ids = sparse_ops.dense_to_sparse_tensor(ids)
return contrib_embedding_ops.safe_embedding_lookup_sparse(
[embeddings], sparse_ids, combiner='mean', default_id=0)
else:
if isinstance(ids, ops.SparseTensor):
if isinstance(ids, sparse_tensor.SparseTensor):
raise TypeError('ids are expected to be dense Tensor, got: %s', ids)
return math_ops.reduce_mean(
embedding_ops.embedding_lookup(embeddings, ids),
......
......@@ -81,7 +81,7 @@ from tensorflow.contrib.layers.python.ops import bucketization_op
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.contrib.lookup import lookup_ops as contrib_lookup_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
......@@ -390,7 +390,7 @@ class _SparseColumnIntegerized(_SparseColumn):
sparse_id_values = math_ops.mod(columns_to_tensors[self.name].values,
self.bucket_size,
name="mod")
columns_to_tensors[self] = ops.SparseTensor(
columns_to_tensors[self] = sparse_tensor_py.SparseTensor(
columns_to_tensors[self.name].indices, sparse_id_values,
columns_to_tensors[self.name].shape)
......@@ -464,7 +464,7 @@ class _SparseColumnHashed(_SparseColumn):
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.bucket_size, name="lookup")
columns_to_tensors[self] = ops.SparseTensor(
columns_to_tensors[self] = sparse_tensor_py.SparseTensor(
sparse_tensor.indices, sparse_id_values, sparse_tensor.shape)
......@@ -1452,7 +1452,8 @@ class _BucketizedColumn(_FeatureColumn, collections.namedtuple(
indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
shape = math_ops.to_int64(array_ops.pack([batch_size, dimension]))
sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)
sparse_id_values = sparse_tensor_py.SparseTensor(
indices, bucket_indices, shape)
return sparse_id_values
......
......@@ -26,6 +26,7 @@ from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
......@@ -362,7 +363,7 @@ def _create_joint_embedding_lookup(columns_to_tensors,
values = t.values + prev_size
prev_size += a.vocab_size
sparse_tensors.append(
ops.SparseTensor(t.indices,
sparse_tensor_py.SparseTensor(t.indices,
values,
t.shape))
sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
......@@ -695,7 +696,7 @@ def _log_variable(variable):
def _infer_real_valued_column_for_tensor(name, tensor):
"""Creates a real_valued_column for given tensor and name."""
if isinstance(tensor, ops.SparseTensor):
if isinstance(tensor, sparse_tensor_py.SparseTensor):
raise ValueError(
'SparseTensor is not supported for auto detection. Please define '
'corresponding FeatureColumn for tensor {} {}.', name, tensor)
......
......@@ -31,6 +31,7 @@ from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
......@@ -1217,7 +1218,7 @@ def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
TypeError: `inputs` is not a `Tensor` or `SparseTensor`.
"""
with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:
if isinstance(inputs, ops.SparseTensor):
if isinstance(inputs, sparse_tensor.SparseTensor):
flattened = _sparse_inner_flatten(inputs, new_rank)
else:
inputs = ops.convert_to_tensor(inputs)
......
......@@ -23,6 +23,7 @@ from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
......@@ -69,12 +70,14 @@ def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
"""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(isinstance(i, ops.SparseTensor) or
if not all(isinstance(i, sparse_tensor.SparseTensor) or
isinstance(i, ops.Tensor) for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [i for i in inputs if isinstance(i, ops.SparseTensor)]
dense_inputs = [i for i in inputs if not isinstance(i, ops.SparseTensor)]
sparse_inputs = [i for i in inputs
if isinstance(i, sparse_tensor.SparseTensor)]
dense_inputs = [i for i in inputs
if not isinstance(i, sparse_tensor.SparseTensor)]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
......@@ -117,7 +120,7 @@ def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
internal_type=internal_type,
name=name))
return ops.SparseTensor(indices_out, values_out, shape_out)
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
ops.RegisterShape("SparseFeatureCross")(common_shapes.call_cpp_shape_fn)
......
......@@ -20,6 +20,7 @@ from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
......@@ -78,4 +79,4 @@ def dense_to_sparse_tensor(dense_tensor, ignore_value=None):
math_ops.mul(higher_dims, shape_multipliers), reduction_indices=[1])
flat_indices = math_ops.add(flat_indices, offsets)
values = array_ops.gather(flat_tensor, flat_indices)
return ops.SparseTensor(indices, values, dense_shape)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
......@@ -22,11 +22,12 @@ from __future__ import print_function
from tensorflow.contrib.layers import feature_column
from tensorflow.contrib.learn.python.learn.dataframe import series as ss
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import parsing_ops
def _to_feature_spec(tensor, default_value=None):
if isinstance(tensor, ops.SparseTensor):
if isinstance(tensor, sparse_tensor.SparseTensor):
return parsing_ops.VarLenFeature(dtype=tensor.dtype)
else:
return parsing_ops.FixedLenFeature(shape=tensor.get_shape(),
......
......@@ -20,7 +20,7 @@ from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
# Each entry is a mapping from registered_name to operation. Each operation is
......@@ -55,8 +55,8 @@ class SeriesBinaryTransform(transform.TensorFlowTransform):
def _apply_transform(self, input_tensors, **kwargs):
# TODO(jamieas): consider supporting sparse inputs.
if isinstance(input_tensors[0], ops.SparseTensor) or isinstance(
input_tensors[1], ops.SparseTensor):
if isinstance(input_tensors[0], sparse_tensor.SparseTensor) or isinstance(
input_tensors[1], sparse_tensor.SparseTensor):
raise TypeError("{} does not support SparseTensors".format(
type(self).__name__))
......@@ -89,8 +89,8 @@ class ScalarBinaryTransform(transform.TensorFlowTransform):
def _apply_transform(self, input_tensors, **kwargs):
input_tensor = input_tensors[0]
if isinstance(input_tensor, ops.SparseTensor):
result = ops.SparseTensor(input_tensor.indices,
if isinstance(input_tensor, sparse_tensor.SparseTensor):
result = sparse_tensor.SparseTensor(input_tensor.indices,
self._apply_op(input_tensor.values),
input_tensor.shape)
else:
......
......@@ -23,6 +23,7 @@ from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
......@@ -93,7 +94,7 @@ class BooleanMask(transform.TensorFlowTransform):
if mask.get_shape().ndims > 1:
mask = array_ops.squeeze(mask)
if isinstance(input_tensor, ops.SparseTensor):
if isinstance(input_tensor, sparse_tensor_py.SparseTensor):
mask_fn = sparse_boolean_mask
else:
mask_fn = array_ops.boolean_mask
......
......@@ -21,14 +21,14 @@ from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
def _negate_sparse(sparse_tensor):
return ops.SparseTensor(indices=sparse_tensor.indices,
values=-sparse_tensor.values,
shape=sparse_tensor.shape)
def _negate_sparse(st):
return sparse_tensor.SparseTensor(indices=st.indices,
values=-st.values,
shape=st.shape)
@series.Series.register_binary_op("__sub__")
......@@ -51,8 +51,8 @@ class Difference(transform.TensorFlowTransform):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
pair_sparsity = (isinstance(input_tensors[0], ops.SparseTensor),
isinstance(input_tensors[1], ops.SparseTensor))
pair_sparsity = (isinstance(input_tensors[0], sparse_tensor.SparseTensor),
isinstance(input_tensors[1], sparse_tensor.SparseTensor))
if pair_sparsity == (False, False):
result = input_tensors[0] - input_tensors[1]
......
......@@ -24,7 +24,7 @@ import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
......@@ -82,4 +82,5 @@ class Sparsify(transform.TensorFlowTransform):
shape = math_ops.cast(array_ops.shape(d), dtypes.int64)
# pylint: disable=not-callable
return self.return_type(ops.SparseTensor(sparse_indices, values, shape))
return self.return_type(
sparse_tensor.SparseTensor(sparse_indices, values, shape))
......@@ -21,7 +21,7 @@ from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
......@@ -45,8 +45,8 @@ class Sum(transform.TensorFlowTransform):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
pair_sparsity = (isinstance(input_tensors[0], ops.SparseTensor),
isinstance(input_tensors[1], ops.SparseTensor))
pair_sparsity = (isinstance(input_tensors[0], sparse_tensor.SparseTensor),
isinstance(input_tensors[1], sparse_tensor.SparseTensor))
if pair_sparsity == (False, False):
result = input_tensors[0] + input_tensors[1]
......@@ -57,6 +57,3 @@ class Sum(transform.TensorFlowTransform):
# pylint: disable=not-callable
return self.return_type(result)
......@@ -21,7 +21,7 @@ from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
# Each entry is a mapping from registered_name to operation. Each operation is
......@@ -83,8 +83,8 @@ def register_unary_op(registered_name, operation, ignore_dtype=None):
def _apply_transform(self, input_tensors, **kwargs):
input_tensor = input_tensors[0]
if isinstance(input_tensor, ops.SparseTensor):
result = ops.SparseTensor(input_tensor.indices,
if isinstance(input_tensor, sparse_tensor.SparseTensor):
result = sparse_tensor.SparseTensor(input_tensor.indices,
operation(input_tensor.values),
input_tensor.shape)
else:
......
......@@ -27,6 +27,7 @@ from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.session_bundle import exporter
from tensorflow.python import summary
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
......@@ -603,7 +604,7 @@ class _MultiClassHead(_Head):
def _check_labels(labels, label_name):
labels = labels[label_name] if isinstance(labels, dict) else labels
if isinstance(labels, ops.SparseTensor):
if isinstance(labels, sparse_tensor.SparseTensor):
raise ValueError("SparseTensor is not supported as labels.")
return labels
......
......@@ -22,7 +22,7 @@ from __future__ import print_function
import collections
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
......@@ -41,7 +41,7 @@ class TensorSignature(collections.namedtuple(
"""
def __new__(cls, tensor):
if isinstance(tensor, ops.SparseTensor):
if isinstance(tensor, sparse_tensor.SparseTensor):
return super(TensorSignature, cls).__new__(
cls, dtype=tensor.values.dtype, shape=None, is_sparse=True)
return super(TensorSignature, cls).__new__(
......
......@@ -24,6 +24,7 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
......@@ -645,7 +646,7 @@ def queue_parsed_features(parsed_features,
# directly.
for key in sorted(parsed_features.keys()):
tensor = parsed_features[key]
if isinstance(tensor, ops.SparseTensor):
if isinstance(tensor, sparse_tensor.SparseTensor):
tensors_mapping.append((key, True))
tensors_to_enqueue.extend([tensor.indices, tensor.values, tensor.shape])
else:
......@@ -704,7 +705,7 @@ def queue_parsed_features(parsed_features,
for key, is_sparse_tensor in tensors_mapping:
if is_sparse_tensor:
# Three tensors are (indices, values, shape).
dequeued_parsed_features[key] = ops.SparseTensor(
dequeued_parsed_features[key] = sparse_tensor.SparseTensor(
dequeued_tensors[index], dequeued_tensors[index + 1],
dequeued_tensors[index + 2])
index += 3
......
......@@ -20,6 +20,7 @@ from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
......@@ -166,7 +167,7 @@ class InitializableLookupTableBase(LookupInterface):
name = "%s_lookup_table_find" % self._name
key_tensor = keys
if isinstance(keys, ops.SparseTensor):
if isinstance(keys, sparse_tensor.SparseTensor):
key_tensor = keys.values
if keys.dtype != self._key_dtype:
......@@ -181,8 +182,8 @@ class InitializableLookupTableBase(LookupInterface):
# pylint: enable=protected-access
values.set_shape(key_tensor.get_shape())
if isinstance(keys, ops.SparseTensor):
return ops.SparseTensor(keys.indices, values, keys.shape)
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, values, keys.shape)
else:
return values
......
......@@ -21,6 +21,7 @@ from __future__ import print_function
from tensorflow.contrib.framework import tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
......@@ -102,7 +103,7 @@ def confusion_matrix(predictions, labels, num_classes=None, dtype=dtypes.int32,
indices = array_ops.transpose(array_ops.pack([predictions, labels]))
values = (array_ops.ones_like(predictions, dtype)
if weights is None else weights)
cm_sparse = ops.SparseTensor(
cm_sparse = sparse_tensor.SparseTensor(
indices=indices, values=values, shape=math_ops.to_int64(shape))
zero_matrix = array_ops.zeros(math_ops.to_int32(shape), dtype)
......
......@@ -29,6 +29,7 @@ from tensorflow.contrib.metrics.python.ops import confusion_matrix_ops
from tensorflow.contrib.metrics.python.ops import set_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
......@@ -1601,7 +1602,8 @@ def num_relevant(labels, k):
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
if isinstance(labels, (ops.SparseTensor, ops.SparseTensorValue)):
if isinstance(
labels, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
labels_sizes = set_ops.set_size(labels)
return math_ops.minimum(labels_sizes, k, name=scope)
......@@ -1637,9 +1639,9 @@ def expand_and_tile(tensor, multiple, dim=0, name=None):
with ops.name_scope(
name, 'expand_and_tile', (tensor, multiple, dim)) as scope:
# Sparse.
if isinstance(tensor, ops.SparseTensorValue):
tensor = ops.SparseTensor.from_value(tensor)
if isinstance(tensor, ops.SparseTensor):
if isinstance(tensor, sparse_tensor.SparseTensorValue):
tensor = sparse_tensor.SparseTensor.from_value(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.shape) + dim, [1])
......@@ -1871,7 +1873,8 @@ def _select_class_id(ids, selected_id):
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
if isinstance(ids, (ops.SparseTensor, ops.SparseTensorValue)):
if isinstance(
ids, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return sparse_ops.sparse_retain(
ids, math_ops.equal(ids.values, selected_id))
......@@ -1888,7 +1891,7 @@ def _select_class_id(ids, selected_id):
filled_selected_id = array_ops.fill(
filled_selected_id_shape, math_ops.to_int64(selected_id))
result = set_ops.set_intersection(filled_selected_id, ids)
return ops.SparseTensor(
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, shape=ids_shape)
......
......@@ -23,6 +23,7 @@ from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.platform import resource_loader
......@@ -54,7 +55,7 @@ def set_size(a, validate_indices=True):
TypeError: If `a` is an invalid types.
"""
a = tensor_util.convert_to_tensor_or_sparse_tensor(a, name="a")
if not isinstance(a, ops.SparseTensor):
if not isinstance(a, sparse_tensor.SparseTensor):
raise TypeError("Expected `SparseTensor`, got %s." % a)
if a.values.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("Invalid dtype %s." % a.values.dtype)
......@@ -106,22 +107,22 @@ def _set_operation(a, b, set_operation, validate_indices=True):
if b.dtype.base_dtype != a.dtype.base_dtype:
raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype))
# pylint: disable=protected-access
if isinstance(a, ops.SparseTensor):
if isinstance(b, ops.SparseTensor):
if isinstance(a, sparse_tensor.SparseTensor):
if isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = _set_ops.sparse_to_sparse_set_operation(
a.indices, a.values, a.shape, b.indices, b.values, b.shape,
set_operation, validate_indices)
else:
raise ValueError("Sparse,Dense is not supported, but Dense,Sparse is. "
"Please flip the order of your inputs.")
elif isinstance(b, ops.SparseTensor):
elif isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = _set_ops.dense_to_sparse_set_operation(
a, b.indices, b.values, b.shape, set_operation, validate_indices)
else:
indices, values, shape = _set_ops.dense_to_dense_set_operation(
a, b, set_operation, validate_indices)
# pylint: enable=protected-access
return ops.SparseTensor(indices, values, shape)
return sparse_tensor.SparseTensor(indices, values, shape)
def set_intersection(a, b, validate_indices=True):
......
......@@ -27,7 +27,7 @@ import abc
from tensorflow.contrib.slim.python.slim.data import data_decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
......@@ -189,11 +189,11 @@ class Tensor(ItemHandler):
shape_dims = []
for k in self._shape_keys:
shape_dim = keys_to_tensors[k]
if isinstance(shape_dim, ops.SparseTensor):
if isinstance(shape_dim, sparse_tensor.SparseTensor):
shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim)
shape_dims.append(shape_dim)
shape = array_ops.reshape(array_ops.pack(shape_dims), [-1])
if isinstance(tensor, ops.SparseTensor):
if isinstance(tensor, sparse_tensor.SparseTensor):
if shape is not None:
tensor = sparse_ops.sparse_reshape(tensor, shape)
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
......@@ -241,7 +241,7 @@ class SparseTensor(ItemHandler):
values = keys_to_tensors[self._values_key]
if self._shape_key:
shape = keys_to_tensors[self._shape_key]
if isinstance(shape, ops.SparseTensor):
if isinstance(shape, sparse_tensor.SparseTensor):
shape = sparse_ops.sparse_tensor_to_dense(shape)
elif self._shape:
shape = self._shape
......@@ -255,7 +255,7 @@ class SparseTensor(ItemHandler):
new_indices = array_ops.concat(1, [indices_columns_to_preserve,
array_ops.reshape(ids, [-1, 1])])
tensor = ops.SparseTensor(new_indices, values.values, shape)
tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
if self._densify:
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
return tensor
......
......@@ -25,6 +25,7 @@ from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
......@@ -77,7 +78,7 @@ def _ParseSparse(data):
ValueError: If data contains non-string Tensors.
"""
for k in sorted(data.keys()):
if not isinstance(data[k], ops.SparseTensor):
if not isinstance(data[k], sparse_tensor.SparseTensor):
raise NotImplementedError(
'Features should be either all sparse or all dense. Use a '
'feature engineering function to convert some of them.')
......@@ -133,7 +134,7 @@ def ParseDataTensorOrDict(data):
# If there's at least one sparse tensor, everything has to be sparse.
is_sparse = False
for v in data.values():
if isinstance(v, ops.SparseTensor):
if isinstance(v, sparse_tensor.SparseTensor):
is_sparse = True
break
if is_sparse:
......@@ -161,11 +162,11 @@ def ParseLabelTensorOrDict(labels):
"""
if isinstance(labels, dict):
return math_ops.to_float(array_ops.concat(
1, [sparse_ops.sparse_tensor_to_dense(labels[
k], default_value=-1) if isinstance(labels, ops.SparseTensor) else
labels[k] for k in sorted(labels.keys())]))
1, [sparse_ops.sparse_tensor_to_dense(labels[k], default_value=-1)
if isinstance(labels, sparse_tensor.SparseTensor)
else labels[k] for k in sorted(labels.keys())]))
else:
if isinstance(labels, ops.SparseTensor):
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.to_float(sparse_ops.sparse_tensor_to_dense(
labels, default_value=-1))
else:
......
......@@ -29,6 +29,7 @@ from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
......@@ -629,7 +630,7 @@ class RandomTreeGraphs(object):
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
if isinstance(input_data, sparse_tensor.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
......@@ -882,7 +883,7 @@ class RandomTreeGraphs(object):
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
if isinstance(input_data, sparse_tensor.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
......
......@@ -281,6 +281,7 @@ py_library(
"framework/load_library.py",
"framework/meta_graph.py",
"framework/random_seed.py",
"framework/sparse_tensor.py",
"framework/tensor_util.py",
],
srcs_version = "PY2AND3",
......@@ -501,6 +502,19 @@ py_test(
],
)
py_test(
name = "framework_sparse_tensor_test",
size = "small",
srcs = ["framework/sparse_tensor_test.py"],
main = "framework/sparse_tensor_test.py",
srcs_version = "PY2AND3",
deps = [
":framework_test_lib",
":platform_test",
"//tensorflow/core:protos_all_py",
],
)
py_test(
name = "framework_device_test",
size = "small",
......
......@@ -28,6 +28,7 @@ from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
......@@ -97,10 +98,10 @@ def _get_feeds_for_indexed_slices(feed, feed_val):
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(ops.SparseTensor,
(sparse_tensor.SparseTensor,
lambda fetch: (
[fetch.indices, fetch.values, fetch.shape],
lambda fetched_vals: ops.SparseTensorValue(*fetched_vals)),
lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(zip(
[feed.indices, feed.values, feed.shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.shape]),
......
......@@ -33,6 +33,7 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
......@@ -464,7 +465,7 @@ class SessionTest(test_util.TensorFlowTestCase):
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
......@@ -533,14 +534,14 @@ class SessionTest(test_util.TensorFlowTestCase):
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
......@@ -555,17 +556,19 @@ class SessionTest(test_util.TensorFlowTestCase):
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {sp: ops.SparseTensorValue(indices, values, shape)})
sp_out = s.run(
sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
......@@ -579,7 +582,7 @@ class SessionTest(test_util.TensorFlowTestCase):
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
......@@ -589,12 +592,13 @@ class SessionTest(test_util.TensorFlowTestCase):
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
......@@ -609,7 +613,7 @@ class SessionTest(test_util.TensorFlowTestCase):
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
......@@ -619,12 +623,13 @@ class SessionTest(test_util.TensorFlowTestCase):
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
......
......@@ -74,10 +74,11 @@ from tensorflow.python.framework.ops import Graph
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.framework.ops import Output
from tensorflow.python.framework.ops import SparseTensor
from tensorflow.python.framework.ops import SparseTensorValue
from tensorflow.python.framework.ops import IndexedSlices
from tensorflow.python.framework.sparse_tensor import SparseTensor
from tensorflow.python.framework.sparse_tensor import SparseTensorValue
# Utilities used when building a Graph.
from tensorflow.python.framework.ops import device
from tensorflow.python.framework.ops import container
......
......@@ -186,7 +186,13 @@ def register_dense_tensor_like_type(tensor_type):
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
class Tensor(object):
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
*Note:* the `Tensor` class will be replaced by `Output` in the future.
......@@ -752,7 +758,7 @@ def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None,
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, (IndexedSlices, SparseTensor)):
if isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r"
......@@ -858,7 +864,7 @@ def register_tensor_conversion_function(base_type, conversion_func,
funcs_at_priority.append((base_type, conversion_func))
class IndexedSlices(object):
class IndexedSlices(_TensorLike):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
......@@ -958,196 +964,6 @@ IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
class SparseTensor(object):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of shape `[N, ndims]`, which specifies
the indices of the elements in the sparse tensor that contain nonzero
values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]`
specifies that the elements with indexes of [1,3] and [2,4] have
nonzero values.
* `values`: A 1-D tensor of any type and shape `[N]`, which supplies the
values for each element in `indices`. For example, given
`indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies
that element [1,3] of the sparse tensor has a value of 18, and element
[2,4] of the tensor has a value of 3.6.
* `shape`: A 1-D int64 tensor of shape `[ndims]`, which specifies the shape
of the sparse tensor. Takes a list indicating the number of elements in
each dimension. For example, `shape=[3,6]` specifies a two-dimensional 3x6
tensor, `shape=[2,3,4]` specifies a three-dimensional 2x3x4 tensor, and
`shape=[9]` specifies a one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse_reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
@@__init__
@@indices
@@values
@@shape
@@dtype
@@op
@@graph
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not (
isinstance(sparse_tensor_value, SparseTensor) or
isinstance(sparse_tensor_value, SparseTensorValue)):
raise TypeError(
"Neither a SparseTensor nor SparseTensorValue: %s." %
sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
shape=sparse_tensor_value.shape)
def __init__(self, indices, values, shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
shape: A 1-D int64 tensor of shape `[ndims]`.
Returns:
A `SparseTensor`
"""
with name_scope(None, "SparseTensor", [indices, values, shape]):
indices = convert_to_tensor(indices, name="indices", dtype=dtypes.int64)
# Always pass as_ref=True because we want to be able to update
# values later if it is a VariableOp.
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = convert_to_tensor(values, name="values", as_ref=True)
shape = convert_to_tensor(shape, name="shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._shape = shape
indices_shape = indices.get_shape().with_rank(2)
values_shape = values.get_shape().with_rank(1)
shape_shape = shape.get_shape().with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape[0].merge_with(values_shape[0])
# Assert number of columns in indices matches the number of elements in
# shape.
indices_shape[1].merge_with(shape_shape[0])
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._shape
@property
def graph(self):
"""The `Graph` that contains the index, value, and shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, shape=%s)" % (
self._indices, self._values, self._shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See [`Session.run()`](../../api_docs/python/client.md#Session.run) for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, shape = _eval_using_default_session(
[self.indices, self.values, self.shape], feed_dict, self.graph, session)
return SparseTensorValue(indices, values, shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
SparseTensorValue = collections.namedtuple("SparseTensorValue",
["indices", "values", "shape"])
def _device_string(dev_spec):
if isinstance(dev_spec, pydev.DeviceSpec):
return dev_spec.to_string()
......@@ -3976,7 +3792,7 @@ def _get_graph_from_inputs(op_input_list, graph=None):
for op_input in op_input_list:
# Determine if this is a valid graph_element.
graph_element = None
if isinstance(op_input, (Operation, Tensor, SparseTensor, IndexedSlices)):
if isinstance(op_input, (Operation, _TensorLike)):
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
......
......@@ -25,6 +25,7 @@ from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
......@@ -86,38 +87,6 @@ class TensorTest(test_util.TensorFlowTestCase):
pass
class SparseTensorTest(test_util.TensorFlowTestCase):
def testInvalidFromValue(self):
for invalid_value in (None, 42.0, ops.convert_to_tensor(42.0)):
with self.assertRaisesRegexp(
TypeError, "Neither a SparseTensor nor SparseTensorValue"):
ops.SparseTensor.from_value(invalid_value)
def testPythonConstruction(self):
indices = [[1, 2], [2, 0], [3, 4]]
values = [b"a", b"b", b"c"]
shape = [4, 5]
sp_value = ops.SparseTensorValue(indices, values, shape)
for sp in [
ops.SparseTensor(indices, values, shape),
ops.SparseTensor.from_value(sp_value),
ops.SparseTensor.from_value(ops.SparseTensor(indices, values, shape))]:
self.assertEqual(sp.indices.dtype, dtypes.int64)
self.assertEqual(sp.values.dtype, dtypes.string)
self.assertEqual(sp.shape.dtype, dtypes.int64)
with self.test_session() as sess:
value = sp.eval()
self.assertAllEqual(indices, value.indices)
self.assertAllEqual(values, value.values)
self.assertAllEqual(shape, value.shape)
sess_run_value = sess.run(sp)
self.assertAllEqual(sess_run_value.indices, value.indices)
self.assertAllEqual(sess_run_value.values, value.values)
self.assertAllEqual(sess_run_value.shape, value.shape)
class IndexedSlicesTest(test_util.TensorFlowTestCase):
def testToTensor(self):
......@@ -1252,7 +1221,7 @@ class OpScopeTest(test_util.TensorFlowTestCase):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
sparse = ops.SparseTensor(
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "const", [], [dtypes.int64]),
_apply_op(g0, "const", [], [dtypes.float32]),
_apply_op(g0, "const", [], [dtypes.int64]))
......
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
# pylint: disable=protected-access
_TensorLike = ops._TensorLike
_eval_using_default_session = ops._eval_using_default_session
_override_helper = ops._override_helper
# pylint: enable=protected-access
class SparseTensor(_TensorLike):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of shape `[N, ndims]`, which specifies
the indices of the elements in the sparse tensor that contain nonzero
values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]`
specifies that the elements with indexes of [1,3] and [2,4] have
nonzero values.
* `values`: A 1-D tensor of any type and shape `[N]`, which supplies the
values for each element in `indices`. For example, given
`indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies
that element [1,3] of the sparse tensor has a value of 18, and element
[2,4] of the tensor has a value of 3.6.
* `shape`: A 1-D int64 tensor of shape `[ndims]`, which specifies the shape
of the sparse tensor. Takes a list indicating the number of elements in
each dimension. For example, `shape=[3,6]` specifies a two-dimensional 3x6
tensor, `shape=[2,3,4]` specifies a three-dimensional 2x3x4 tensor, and
`shape=[9]` specifies a one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse_reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
@@__init__
@@get_shape
@@indices
@@values
@@shape
@@dtype
@@op
@@graph
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not (isinstance(sparse_tensor_value, SparseTensor) or
isinstance(sparse_tensor_value, SparseTensorValue)):
raise TypeError(
"Neither a SparseTensor nor SparseTensorValue: %s."
% sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
shape=sparse_tensor_value.shape)
def __init__(self, indices, values, shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
shape: A 1-D int64 tensor of shape `[ndims]`.
Returns:
A `SparseTensor`
"""
with ops.name_scope(None, "SparseTensor", [indices, values, shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# Always pass as_ref=True because we want to be able to update
# values later if it is a VariableOp.
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.convert_to_tensor(values, name="values", as_ref=True)
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._shape = shape
indices_shape = indices.get_shape().with_rank(2)
values_shape = values.get_shape().with_rank(1)
shape_shape = shape.get_shape().with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape[0].merge_with(values_shape[0])
# Assert number of columns in indices matches the number of elements in
# shape.
indices_shape[1].merge_with(shape_shape[0])
def get_shape(self):
"""Get the `TensorShape` that represents the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._shape)
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._shape
@property
def graph(self):
"""The `Graph` that contains the index, value, and shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, shape=%s)" % (
self._indices, self._values, self._shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See [`Session.run()`](../../api_docs/python/client.md#Session.run) for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, shape = _eval_using_default_session(
[self.indices, self.values, self.shape], feed_dict, self.graph, session)
return SparseTensorValue(indices, values, shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
SparseTensorValue = collections.namedtuple("SparseTensorValue",
["indices", "values", "shape"])
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class SparseTensorTest(test_util.TensorFlowTestCase):
def testPythonConstruction(self):
indices = [[1, 2], [2, 0], [3, 4]]
values = [b"a", b"b", b"c"]
shape = [4, 5]
sp_value = sparse_tensor.SparseTensorValue(indices, values, shape)
for sp in [
sparse_tensor.SparseTensor(indices, values, shape),
sparse_tensor.SparseTensor.from_value(sp_value),
sparse_tensor.SparseTensor.from_value(
sparse_tensor.SparseTensor(indices, values, shape))]:
self.assertEqual(sp.indices.dtype, dtypes.int64)
self.assertEqual(sp.values.dtype, dtypes.string)
self.assertEqual(sp.shape.dtype, dtypes.int64)
self.assertEqual(sp.get_shape(), (4, 5))
with self.test_session() as sess:
value = sp.eval()
self.assertAllEqual(indices, value.indices)
self.assertAllEqual(values, value.values)
self.assertAllEqual(shape, value.shape)
sess_run_value = sess.run(sp)
self.assertAllEqual(sess_run_value.indices, value.indices)
self.assertAllEqual(sess_run_value.values, value.values)
self.assertAllEqual(sess_run_value.shape, value.shape)
if __name__ == "__main__":
googletest.main()
......@@ -22,7 +22,6 @@ import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
......@@ -36,7 +35,7 @@ def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x_values = x[non_zero]
x_shape = x.shape
return ops.SparseTensor(
return tf.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), len(x_values)
class ShapeOpsTest(tf.test.TestCase):
......
......@@ -24,7 +24,6 @@ import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
......@@ -41,7 +40,7 @@ def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x_values = x[non_zero]
x_shape = x.shape
return ops.SparseTensor(
return tf.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), len(x_values)
......@@ -54,7 +53,7 @@ class SparseToIndicatorTest(test_util.TensorFlowTestCase):
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
return tf.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
......@@ -72,7 +71,7 @@ class SparseToIndicatorTest(test_util.TensorFlowTestCase):
[1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
return ops.SparseTensor(
return tf.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
......@@ -130,11 +129,11 @@ class SparseMergeTest(test_util.TensorFlowTestCase):
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices = ops.SparseTensorValue(
indices = tf.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices, indices_dtype),
np.array(shape, np.int64))
values = ops.SparseTensorValue(
values = tf.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype),
np.array(shape, np.int64))
......@@ -143,8 +142,8 @@ class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
return (
ops.SparseTensor.from_value(indices),
ops.SparseTensor.from_value(values))
tf.SparseTensor.from_value(indices),
tf.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(
......@@ -172,8 +171,8 @@ class SparseMergeTest(test_util.TensorFlowTestCase):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with self.test_session(use_gpu=False) as sess:
for indices in (indices_v, ops.SparseTensor.from_value(indices_v)):
for values in (values_v, ops.SparseTensor.from_value(values_v)):
for indices in (indices_v, tf.SparseTensor.from_value(indices_v)):
for values in (values_v, tf.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
......@@ -237,13 +236,13 @@ class SparseRetainTest(test_util.TensorFlowTestCase):
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensorValue(
return tf.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32),
np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return ops.SparseTensor.from_value(self._SparseTensorValue_5x6())
return tf.SparseTensor.from_value(self._SparseTensorValue_5x6())
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
......@@ -285,13 +284,13 @@ class SparseResetShapeTest(test_util.TensorFlowTestCase):
_SHP_2_5_6 = np.array([2, 5, 6], dtype=np.int64)
def _SparseTensor_2x5x6(self):
return ops.SparseTensor(
return tf.SparseTensor(
constant_op.constant(self._IND_2_5_6, dtypes.int64),
constant_op.constant(self._VAL_2_5_6, dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensorValue_2x5x6(self):
return ops.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
return tf.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
self._SHP_2_5_6)
def testBasic(self):
......@@ -395,13 +394,13 @@ class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensorValue(
return tf.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32),
np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return ops.SparseTensor.from_value(self._SparseTensorValue_5x6())
return tf.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
ind = np.array([
......@@ -410,7 +409,7 @@ class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
[3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return ops.SparseTensor(
return tf.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
......@@ -419,7 +418,7 @@ class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return ops.SparseTensor(
return tf.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
......@@ -518,7 +517,7 @@ class SparseReduceSumTest(test_util.TensorFlowTestCase):
self._compare(sp_t, reduction_axes, ndims, True)
def testSimpleAndRandomInputs(self):
sp_t = ops.SparseTensor(self.ind, self.vals, self.shape)
sp_t = tf.SparseTensor(self.ind, self.vals, self.shape)
with self.test_session(use_gpu=False):
self._compare_all(sp_t, None, ndims=2)
......@@ -542,7 +541,7 @@ class SparseReduceSumTest(test_util.TensorFlowTestCase):
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
sp_t = ops.SparseTensor(self.ind, self.vals, self.shape)
sp_t = tf.SparseTensor(self.ind, self.vals, self.shape)
with self.test_session(use_gpu=False):
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_sum(sp_t, -3).eval()
......@@ -574,8 +573,8 @@ class SparseReduceSumTest(test_util.TensorFlowTestCase):
class SparseMathOpsTest(test_util.TensorFlowTestCase):
def _check(self, result_tensor, result_np, input_sp_t):
self.assertTrue(isinstance(result_tensor, ops.SparseTensor))
self.assertTrue(isinstance(input_sp_t, ops.SparseTensor))
self.assertTrue(isinstance(result_tensor, tf.SparseTensor))
self.assertTrue(isinstance(input_sp_t, tf.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.shape.eval(), result_tensor.shape.eval())
......@@ -725,17 +724,17 @@ class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def testBasic(self):
with self.test_session(use_gpu=False):
# 1-D, values at index 0.
sp_zero = ops.SparseTensor([[0]], [0], [7])
sp_one = ops.SparseTensor([[0]], [1], [7])
sp_zero = tf.SparseTensor([[0]], [0], [7])
sp_one = tf.SparseTensor([[0]], [1], [7])
max_tf = tf.sparse_maximum(sp_zero, sp_one).eval()
min_tf = tf.sparse_minimum(sp_zero, sp_one).eval()
self._assertSparseTensorValueEqual(sp_one.eval(), max_tf)
self._assertSparseTensorValueEqual(sp_zero.eval(), min_tf)
# Values at different indices.
sp_zero = ops.SparseTensor([[0]], [0], [7])
sp_zero_2 = ops.SparseTensor([[1]], [0], [7])
expected = ops.SparseTensor([[0], [1]], [0, 0], [7])
sp_zero = tf.SparseTensor([[0]], [0], [7])
sp_zero_2 = tf.SparseTensor([[1]], [0], [7])
expected = tf.SparseTensor([[0], [1]], [0, 0], [7])
max_tf = tf.sparse_maximum(sp_zero, sp_zero_2).eval()
min_tf = tf.sparse_minimum(sp_zero, sp_zero_2).eval()
self._assertSparseTensorValueEqual(expected.eval(), max_tf)
......@@ -767,13 +766,13 @@ class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def testMismatchedShapes(self):
with self.test_session(use_gpu=False):
sp_zero = ops.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = ops.SparseTensor([[0]], [1], [2])
sp_zero = tf.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = tf.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
tf.sparse_maximum(sp_zero, sp_one).eval()
sp_zero = ops.SparseTensor([[0]], [0], [1])
sp_one = ops.SparseTensor([[0]], [1], [2])
sp_zero = tf.SparseTensor([[0]], [0], [1])
sp_one = tf.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
tf.sparse_maximum(sp_zero, sp_one).eval()
......
......@@ -23,6 +23,7 @@ from math import ceil
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
......@@ -552,7 +553,7 @@ def _ExtractImagePatchesGrad(op, grad):
sp_shape = (rows_in * cols_in,
rows_out * cols_out * ksize_r * ksize_c)
sp_mat = ops.SparseTensor(
sp_mat = sparse_tensor.SparseTensor(
array_ops.constant(idx, dtype=ops.dtypes.int64),
array_ops.ones((len(idx),), dtype=ops.dtypes.float32),
sp_shape
......
......@@ -93,6 +93,7 @@ from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
......@@ -176,7 +177,8 @@ def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(input, (ops.SparseTensor, ops.SparseTensorValue)):
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.shape, out_type)
else:
input_tensor = ops.convert_to_tensor(input)
......@@ -227,7 +229,8 @@ def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(input, (ops.SparseTensor, ops.SparseTensorValue)):
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops._prod(
gen_math_ops.cast(input.shape, out_type), 0, name=name)
else:
......@@ -279,7 +282,8 @@ def rank_internal(input, name=None, optimize=True):
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(input, (ops.SparseTensor, ops.SparseTensorValue)):
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
......@@ -1499,7 +1503,7 @@ def sparse_placeholder(dtype, shape=None, name=None):
shape = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[None], name=shape_name)
return ops.SparseTensor(
return sparse_tensor.SparseTensor(
values=placeholder(
dtype, shape=[None],
name=(name + "/values") if name is not None else None),
......@@ -2004,9 +2008,13 @@ def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, (ops.SparseTensor, ops.SparseTensorValue)):
if not isinstance(
hypothesis, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(truth, (ops.SparseTensor, ops.SparseTensorValue)):
if not isinstance(
truth, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops._edit_distance(hypothesis.indices,
......
......@@ -42,6 +42,7 @@ import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
......@@ -88,7 +89,7 @@ def assert_proper_iterable(values):
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, ops.SparseTensor, np.ndarray)
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
+ compat.bytes_or_text_types
)
if isinstance(values, unintentional_iterables):
......
......@@ -21,6 +21,7 @@ from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
......@@ -149,7 +150,7 @@ def _ExitGrad(op, grad):
if isinstance(grad, ops.Tensor):
grad_ctxt.AddName(grad.name)
else:
if not isinstance(grad, (ops.IndexedSlices, ops.SparseTensor)):
if not isinstance(grad, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(grad))
grad_ctxt.AddName(grad.values.name)
grad_ctxt.AddName(grad.indices.name)
......
......@@ -80,6 +80,7 @@ from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
......@@ -165,7 +166,7 @@ def _Identity(data, name=None):
else:
return array_ops.identity(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Identity(data.values, name=name)
indices = array_ops.identity(data.indices, name="indices")
......@@ -176,7 +177,7 @@ def _Identity(data, name=None):
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = array_ops.identity(data.shape, name="dense_shape")
return ops.SparseTensor(indices, values, dense_shape)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _NextIteration(data, name=None):
......@@ -187,7 +188,7 @@ def _NextIteration(data, name=None):
else:
return next_iteration(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _NextIteration(data.values, name=name)
indices = next_iteration(data.indices, name="indices")
......@@ -198,7 +199,7 @@ def _NextIteration(data, name=None):
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = next_iteration(data.shape, name="dense_shape")
return ops.SparseTensor(indices, values, dense_shape)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
......@@ -233,7 +234,7 @@ def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
result.set_shape(data.get_shape())
return result
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Enter(data.values, frame_name, is_constant,
parallel_iterations=parallel_iterations,
......@@ -255,7 +256,7 @@ def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.shape.get_shape())
return ops.SparseTensor(indices, values, dense_shape)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def exit(data, name=None):
......@@ -277,7 +278,7 @@ def exit(data, name=None):
else:
return gen_control_flow_ops._exit(data, name)
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = exit(data.values, name=name)
indices = gen_control_flow_ops._exit(data.indices, name="indices")
......@@ -288,7 +289,7 @@ def exit(data, name=None):
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = gen_control_flow_ops._exit(data.shape, name)
return ops.SparseTensor(indices, values, dense_shape)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def switch(data, pred, dtype=None, name=None):
......@@ -317,7 +318,7 @@ def switch(data, pred, dtype=None, name=None):
if isinstance(data, ops.Tensor):
return gen_control_flow_ops._switch(data, pred, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
val, ind = data.values, data.indices
val_f, val_t = gen_control_flow_ops._switch(val, pred, name=name)
......@@ -335,8 +336,8 @@ def switch(data, pred, dtype=None, name=None):
dense_shape = data.shape
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
data.shape, pred, name="dense_shape")
return (ops.SparseTensor(ind_f, val_f, dense_shape_f),
ops.SparseTensor(ind_t, val_t, dense_shape_t))
return (sparse_tensor.SparseTensor(ind_f, val_f, dense_shape_f),
sparse_tensor.SparseTensor(ind_t, val_t, dense_shape_t))
def _SwitchRefOrTensor(data, pred, name="Switch"):
......@@ -418,14 +419,15 @@ def merge(inputs, name=None):
return gen_control_flow_ops._ref_merge(inputs, name)
else:
return gen_control_flow_ops._merge(inputs, name)
elif all([isinstance(v, ops.SparseTensor) for v in inputs]):
elif all([isinstance(v, sparse_tensor.SparseTensor) for v in inputs]):
# Only handle the case when all inputs are SparseTensor.
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.shape for inp in inputs], name="dense_shape")
return ops.SparseTensor(indices, values, dense_shape), chosen_index
return (sparse_tensor.SparseTensor(indices, values, dense_shape),
chosen_index)
else:
# For now convert all the inputs as IndexedSlices.
inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)
......@@ -533,7 +535,7 @@ def _SetShapeInvariants(input_vars, enter_vars, shapes):
% (inp.name, inp.get_shape(), shape))
var.set_shape(shape)
else:
if not isinstance(var, (ops.IndexedSlices, ops.SparseTensor)):
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
if not _ShapeLessThanOrEqual(inp.values.get_shape(), shape):
......@@ -584,7 +586,7 @@ def _EnforceShapeInvariant(merge_var, next_var):
"argument of tf.while_loop or set_shape() on the loop variables."
% (merge_var.name, m_shape, n_shape))
else:
if not isinstance(var, (ops.IndexedSlices, ops.SparseTensor)):
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
m_values_shape = merge_var.values.get_shape()
......@@ -645,8 +647,8 @@ def _AddNextAndBackEdge(m, v):
if v.dense_shape is None:
raise ValueError("Must have dense shape: %s" % v.name)
m.dense_shape.op._update_input(1, v.dense_shape)
elif isinstance(m, ops.SparseTensor):
if not isinstance(v, ops.SparseTensor):
elif isinstance(m, sparse_tensor.SparseTensor):
if not isinstance(v, sparse_tensor.SparseTensor):
raise ValueError("Must be a sparse tensor: %s" % v.name)
v = _NextIteration(v)
# pylint: disable=protected-access
......@@ -1687,7 +1689,7 @@ class CondContext(ControlFlowContext):
# Use pivot as the proxy for this op.
real_v = with_dependencies([v], self._pivot)
else:
if isinstance(v, (ops.IndexedSlices, ops.SparseTensor)):
if isinstance(v, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
values = self._ProcessOutputTensor(v.values)
indices = self._ProcessOutputTensor(v.indices)
if isinstance(v, ops.IndexedSlices):
......@@ -1697,7 +1699,7 @@ class CondContext(ControlFlowContext):
real_v = ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = self._ProcessOutputTensor(v.shape)
real_v = ops.SparseTensor(indices, values, dense_shape)
real_v = sparse_tensor.SparseTensor(indices, values, dense_shape)
else:
real_v = self._ProcessOutputTensor(v)
result.append(real_v)
......@@ -1791,8 +1793,8 @@ def cond(pred, fn1, fn2, name=None):
for x, y in zip(res_f, res_t):
assert ((isinstance(x, ops.IndexedSlices) and
isinstance(y, ops.IndexedSlices)) or
(isinstance(x, ops.SparseTensor) and
isinstance(y, ops.SparseTensor)) or
(isinstance(x, sparse_tensor.SparseTensor) and
isinstance(y, sparse_tensor.SparseTensor)) or
(isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)))
val_x = x if isinstance(x, ops.Tensor) else x.values
val_y = y if isinstance(y, ops.Tensor) else y.values
......@@ -2356,7 +2358,7 @@ class WhileContext(ControlFlowContext):
self._values.add(x.indices.name)
if isinstance(x, ops.IndexedSlices):
dense_shape = x.dense_shape
elif isinstance(x, ops.SparseTensor):
elif isinstance(x, sparse_tensor.SparseTensor):
dense_shape = x.shape
else:
raise TypeError("Type %s not supported" % type(x))
......@@ -2487,7 +2489,7 @@ class WhileContext(ControlFlowContext):
if isinstance(e, ops.Tensor):
xs = [e]
else:
if not isinstance(e, (ops.IndexedSlices, ops.SparseTensor)):
if not isinstance(e, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(e))
xs = [e.values, e.indices]
shape = e.dense_shape if isinstance(e, ops.IndexedSlices) else e.shape
......
......@@ -21,6 +21,7 @@ from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_ctc_ops
from tensorflow.python.ops import array_ops
......@@ -29,7 +30,8 @@ from tensorflow.python.ops.nn_grad import _BroadcastMul
# pylint: disable=protected-access, invalid-name
def ctc_loss(inputs, labels, sequence_length,
preprocess_collapse_repeated=False, ctc_merge_repeated=True, time_major=True):
preprocess_collapse_repeated=False,
ctc_merge_repeated=True, time_major=True):
"""Computes the CTC (Connectionist Temporal Classification) Loss.
This op implements the CTC loss as presented in the article:
......@@ -128,7 +130,7 @@ def ctc_loss(inputs, labels, sequence_length,
"""
# The second, third, etc output tensors contain the gradients. We use it in
# _CTCLossGrad() below.
if not isinstance(labels, ops.SparseTensor):
if not isinstance(labels, sparse_tensor.SparseTensor):
raise TypeError("Expected labels to be a SparseTensor")
# For internal calculations, we transpose to [time, batch, num_classes]
......@@ -206,7 +208,7 @@ def ctc_greedy_decoder(inputs, sequence_length, merge_repeated=True):
outputs = gen_ctc_ops._ctc_greedy_decoder(
inputs, sequence_length, merge_repeated=merge_repeated)
(decoded_ix, decoded_val, decoded_shape, log_probabilities) = outputs
return ([ops.SparseTensor(decoded_ix, decoded_val, decoded_shape)],
return ([sparse_tensor.SparseTensor(decoded_ix, decoded_val, decoded_shape)],
log_probabilities)
......@@ -258,7 +260,7 @@ def ctc_beam_search_decoder(inputs, sequence_length, beam_width=100,
merge_repeated=merge_repeated))
return (
[ops.SparseTensor(ix, val, shape) for (ix, val, shape)
[sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(decoded_ixs, decoded_vals, decoded_shapes)],
log_probabilities)
......
......@@ -23,6 +23,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
......@@ -257,11 +258,11 @@ def embedding_lookup_sparse(params, sp_ids, sp_weights,
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, ops.SparseTensor):
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, ops.SparseTensor):
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
......
......@@ -33,6 +33,7 @@ from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
......@@ -301,7 +302,7 @@ def map_fn(fn, elems, dtype=None, parallel_iterations=10, back_prop=True,
if not callable(fn):
raise TypeError("fn must be callable.")
if isinstance(elems, ops.SparseTensor):
if isinstance(elems, sparse_tensor.SparseTensor):
raise TypeError(
"To perform a map on the values of a sparse tensor use either "
" SparseTensor(input.indices, fn(input.values), input.shape) or "
......
......@@ -217,6 +217,7 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
......@@ -258,13 +259,15 @@ def abs(x, name=None):
values.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, ops.SparseTensor):
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops.complex_abs(x.values,
Tout=x.values.dtype.real_dtype, name=name)
return ops.SparseTensor(indices=x.indices, values=x_abs, shape=x.shape)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, shape=x.shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_abs, shape=x.shape)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, shape=x.shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
......@@ -297,9 +300,10 @@ def neg(x, name=None):
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, ops.SparseTensor):
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops.neg(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_neg, shape=x.shape)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, shape=x.shape)
else:
return gen_math_ops.neg(x, name=name)
......@@ -320,9 +324,10 @@ def sign(x, name=None):
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, ops.SparseTensor):
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_sign, shape=x.shape)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, shape=x.shape)
else:
return gen_math_ops.sign(x, name=name)
......@@ -341,9 +346,10 @@ def square(x, name=None):
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, ops.SparseTensor):
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_square, shape=x.shape)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, shape=x.shape)
else:
return gen_math_ops.square(x, name=name)
......@@ -362,9 +368,10 @@ def sqrt(x, name=None):
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, ops.SparseTensor):
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_sqrt, shape=x.shape)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, shape=x.shape)
else:
return gen_math_ops.sqrt(x, name=name)
......@@ -381,9 +388,10 @@ def erf(x, name=None):
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, ops.SparseTensor):
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_erf, shape=x.shape)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, shape=x.shape)
else:
return gen_math_ops.erf(x, name=name)
......@@ -624,9 +632,9 @@ def cast(x, dtype, name=None):
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, ops.SparseTensor):
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
return sparse_tensor.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
......@@ -769,14 +777,15 @@ def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, ops.SparseTensor):
if not isinstance(y, sparse_tensor.SparseTensor):
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return ops.SparseTensor(sp_x.indices, func(sp_x.indices, sp_x.values,
return sparse_tensor.SparseTensor(
sp_x.indices, func(sp_x.indices, sp_x.values,
sp_x.shape, y, name=name),
sp_x.shape)
......@@ -934,18 +943,18 @@ def _mul_dispatch(x, y, name=None):
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, ops.SparseTensor) # Case: Dense * Sparse.
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.shape, x, name)
return ops.SparseTensor(y.indices, new_vals, y.shape)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.shape)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
ops.SparseTensor)
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
ops.SparseTensor)
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
ops.SparseTensor)
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
......@@ -1060,7 +1069,7 @@ def _ReductionDims(x, reduction_indices):
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(np.arange(x.get_shape().ndims),
dtype=dtypes.int32)
if (isinstance(x, ops.SparseTensor) and
if (isinstance(x, sparse_tensor.SparseTensor) and
x.shape.get_shape().is_fully_defined()):
rank = x.shape.get_shape()[0].value # sparse.shape is an 1-D tensor.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
......@@ -1725,9 +1734,10 @@ def tanh(x, name=None):
`x.dtype != qint32` otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, ops.SparseTensor):
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_tanh, shape=x.shape)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, shape=x.shape)
else:
return gen_math_ops._tanh(x, name=name)
......
......@@ -24,6 +24,7 @@ import re
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
......@@ -406,7 +407,8 @@ def _parse_example_raw(serialized,
(sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs
sparse_tensors = [ops.SparseTensor(ix, val, shape) for (ix, val, shape)
sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(sparse_indices, sparse_values, sparse_shapes)]
return dict(
......@@ -530,7 +532,7 @@ def _parse_single_example_raw(serialized,
if sparse_keys is not None:
for s in sparse_keys:
s_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", s)
outputs[s] = ops.SparseTensor(
outputs[s] = sparse_tensor.SparseTensor(
array_ops.slice(outputs[s].indices,
[0, 1], [-1, -1], name="Slice_Indices_%s" % s_name),
outputs[s].values,
......@@ -841,13 +843,13 @@ def _parse_single_sequence_example_raw(serialized,
feature_list_sparse_shapes, feature_list_dense_values) = outputs
context_sparse_tensors = [
ops.SparseTensor(ix, val, shape) for (ix, val, shape)
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(context_sparse_indices,
context_sparse_values,
context_sparse_shapes)]
feature_list_sparse_tensors = [
ops.SparseTensor(ix, val, shape) for (ix, val, shape)
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(feature_list_sparse_indices,
feature_list_sparse_values,
feature_list_sparse_shapes)]
......
......@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
......@@ -51,7 +52,8 @@ def _SparseReorderGrad(op, unused_output_indices_grad, output_values_grad):
num_entries = array_ops.shape(input_indices)[0]
entry_indices = math_ops.range(num_entries)
sp_unordered = ops.SparseTensor(input_indices, entry_indices, input_shape)
sp_unordered = sparse_tensor.SparseTensor(
input_indices, entry_indices, input_shape)
sp_ordered = sparse_ops.sparse_reorder(sp_unordered)
inverted_permutation = array_ops.invert_permutation(sp_ordered.values)
......@@ -134,7 +136,7 @@ def _SparseTensorDenseMatMulGrad(op, grad):
Raises:
TypeError: When the two operands don't have the same type.
"""
sp_t = ops.SparseTensor(*op.inputs[:3])
sp_t = sparse_tensor.SparseTensor(*op.inputs[:3])
adj_a = op.get_attr("adjoint_a")
adj_b = op.get_attr("adjoint_b")
......@@ -209,7 +211,7 @@ def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
# indices can repeat after scaling, so we can't use sparse_to_dense().
dy = sparse_ops.sparse_add(
array_ops.zeros_like(y),
ops.SparseTensor(scaled_indices, dy_val, y_shape))
sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))
# (sp_indices, sp_vals, sp_shape, dense)
return (None, dx, None, dy)
......@@ -246,9 +248,9 @@ def _SparseSoftmaxGrad(op, grad):
"""
indices, shape = op.inputs[0], op.inputs[2]
out_vals = op.outputs[0]
sp_output = ops.SparseTensor(indices, out_vals, shape)
sp_grad = ops.SparseTensor(indices, grad, shape)
sp_product = ops.SparseTensor(
sp_output = sparse_tensor.SparseTensor(indices, out_vals, shape)
sp_grad = sparse_tensor.SparseTensor(indices, grad, shape)
sp_product = sparse_tensor.SparseTensor(
indices, sp_output.values * sp_grad.values, shape)
# [..., B, 1], dense.
......
......@@ -62,6 +62,7 @@ import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
......@@ -86,9 +87,9 @@ def _convert_to_sparse_tensor(sp_input):
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, ops.SparseTensorValue):
return ops.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, ops.SparseTensor):
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
......@@ -232,7 +233,7 @@ def sparse_concat(concat_dim, sp_inputs, name=None, expand_nonconcat_dim=False):
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_concat(
inds, vals, shapes, concat_dim, name=name))
return ops.SparseTensor(output_ind, output_val, output_shape)
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
def sparse_add(a, b, thresh=0):
......@@ -284,7 +285,7 @@ def sparse_add(a, b, thresh=0):
Raises:
TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (ops.SparseTensor, ops.SparseTensorValue)
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
raise TypeError("At least one input should be SparseTensor; do you mean to"
" use tf.add()?")
......@@ -295,7 +296,7 @@ def sparse_add(a, b, thresh=0):
thresh, dtype=a.values.dtype.real_dtype, name="thresh")
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_add(
a.indices, a.values, a.shape, b.indices, b.values, b.shape, thresh))
return ops.SparseTensor(output_ind, output_val, output_shape)
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
else:
# swap to make `a` the SparseTensor.
if isinstance(b, sparse_classes):
......@@ -329,7 +330,7 @@ def sparse_dense_cwise_add(sp_t, dense_t):
"""
result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,
sp_t.shape, dense_t)
return ops.SparseTensor(sp_t.indices, result, sp_t.shape)
return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.shape)
ops.RegisterShape("SparseTensorDenseAdd")(common_shapes.call_cpp_shape_fn)
......@@ -377,7 +378,7 @@ def sparse_reorder(sp_input, name=None):
reordered_ind, reordered_val = (gen_sparse_ops._sparse_reorder(
sp_input.indices, sp_input.values, sp_input.shape, name=name))
return ops.SparseTensor(reordered_ind, reordered_val,
return sparse_tensor.SparseTensor(reordered_ind, reordered_val,
array_ops.identity(sp_input.shape))
......@@ -435,7 +436,8 @@ def sparse_reshape(sp_input, shape, name=None):
reshaped_ind, reshaped_shape = gen_sparse_ops._sparse_reshape(
sp_input.indices, sp_input.shape, shape, name=name)
return ops.SparseTensor(reshaped_ind, array_ops.identity(sp_input.values),
return sparse_tensor.SparseTensor(
reshaped_ind, array_ops.identity(sp_input.values),
reshaped_shape)
......@@ -488,7 +490,8 @@ def sparse_split(split_dim, num_split, sp_input, name=None):
sparse_tensors = []
for i in range(0, num_split):
sparse_tensors.append(
ops.SparseTensor(output_inds[i], output_vals[i], output_shapes[i]))
sparse_tensor.SparseTensor(
output_inds[i], output_vals[i], output_shapes[i]))
return sparse_tensors
......@@ -633,7 +636,7 @@ def sparse_reduce_sum_sparse(sp_input, reduction_axes=None, keep_dims=False):
sp_input.shape, math_ops._ReductionDims(sp_input, reduction_axes),
keep_dims))
return ops.SparseTensor(output_ind, output_val, output_shape)
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
ops.RegisterShape("SparseReduceSumSparse")(common_shapes.call_cpp_shape_fn)
......@@ -741,7 +744,8 @@ def sparse_to_indicator(sp_input, vocab_size, name=None):
with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name:
num_entries = array_ops.shape(sp_input.indices)[0]
new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
sp_values = ops.SparseTensor(sp_input.indices, new_values, sp_input.shape)
sp_values = sparse_tensor.SparseTensor(
sp_input.indices, new_values, sp_input.shape)
sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
......@@ -851,7 +855,7 @@ def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
[array_ops.slice(sp_ids.shape, [0], array_ops.expand_dims(rank - 1, 0)),
math_ops.cast(array_ops.pack([vocab_size]), dtypes.int64)])
result = ops.SparseTensor(new_indices, new_values, new_shape)
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
return result if already_sorted else sparse_reorder(result)
......@@ -894,7 +898,7 @@ def sparse_retain(sp_input, to_retain):
where_true = array_ops.reshape(array_ops.where(to_retain), [-1])
new_indices = array_ops.gather(sp_input.indices, where_true)
new_values = array_ops.gather(sp_input.values, where_true)
return ops.SparseTensor(new_indices, new_values,
return sparse_tensor.SparseTensor(new_indices, new_values,
array_ops.identity(sp_input.shape))
......@@ -967,7 +971,7 @@ def sparse_reset_shape(sp_input, new_shape=None):
output_shape_tensor.get_shape().assert_has_rank(1)
output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)
# For cases when shape is known during graph construction, this catches the
# error before the ops.SparseTensor catches it.
# error before the sparse_tensor.SparseTensor catches it.
output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0])
# For cases where shape is not known during graph construction.
......@@ -979,7 +983,7 @@ def sparse_reset_shape(sp_input, new_shape=None):
[check_ops.assert_less_equal(in_shape, output_shape_tensor)],
output_shape_tensor)
return ops.SparseTensor(in_indices, in_values, output_shape_tensor)
return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
def sparse_fill_empty_rows(sp_input, default_value, name=None):
......@@ -1054,7 +1058,8 @@ def sparse_fill_empty_rows(sp_input, default_value, name=None):
additional_indices])
all_values_unordered = array_ops.concat(0, [sp_input.values,
additional_values])
sp_unordered_output = ops.SparseTensor(all_indices_unordered,
sp_unordered_output = sparse_tensor.SparseTensor(
all_indices_unordered,
all_values_unordered, sp_input.shape)
sp_ordered_output = sparse_reorder(sp_unordered_output)
......@@ -1182,7 +1187,7 @@ def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return ops.SparseTensor(output_indices, output_values, output_shape)
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
ops.RegisterShape("DeserializeManySparse")(common_shapes.call_cpp_shape_fn)
......@@ -1423,7 +1428,8 @@ def sparse_softmax(sp_input, name=None):
[sp_input.indices, sp_input.values]) as name:
out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,
sp_input.shape)
return ops.SparseTensor(sp_input.indices, out_vals, sp_input.shape)
return sparse_tensor.SparseTensor(
sp_input.indices, out_vals, sp_input.shape)
ops.RegisterShape("SparseSoftmax")(common_shapes.call_cpp_shape_fn)
......@@ -1436,8 +1442,8 @@ def sparse_maximum(sp_a, sp_b, name=None):
Example:
```python
sp_zero = ops.SparseTensor([[0]], [0], [7])
sp_one = ops.SparseTensor([[1]], [1], [7])
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_maximum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
```
......@@ -1462,7 +1468,7 @@ def sparse_maximum(sp_a, sp_b, name=None):
sp_b.values,
sp_b.shape,
name=name)
return ops.SparseTensor(out_indices, out_values, sp_a.shape)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.shape)
def sparse_minimum(sp_a, sp_b, name=None):
......@@ -1472,8 +1478,8 @@ def sparse_minimum(sp_a, sp_b, name=None):
Example:
```python
sp_zero = ops.SparseTensor([[0]], [0], [7])
sp_one = ops.SparseTensor([[1]], [1], [7])
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_minimum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
```
......@@ -1498,7 +1504,7 @@ def sparse_minimum(sp_a, sp_b, name=None):
sp_b.values,
sp_b.shape,
name=name)
return ops.SparseTensor(out_indices, out_values, sp_a.shape)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.shape)
ops.RegisterShape("SparseSparseMaximum")(common_shapes.call_cpp_shape_fn)
......@@ -1547,7 +1553,8 @@ def sparse_transpose(sp_input, perm=None, name=None):
array_ops.gather(array_ops.transpose(indices), perm))
dense_shape = sp_input.shape
transposed_dense_shape = array_ops.gather(dense_shape, perm)
transposed_st = ops.SparseTensor(transposed_indices, sp_input.values,
transposed_st = sparse_tensor.SparseTensor(
transposed_indices, sp_input.values,
transposed_dense_shape)
transposed_st = sparse_reorder(transposed_st)
return transposed_st
......@@ -1691,7 +1698,7 @@ def _take_many_sparse_from_tensors_map(
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return ops.SparseTensor(output_indices, output_values, output_shape)
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
ops.RegisterShape("AddSparseToTensorsMap")(common_shapes.call_cpp_shape_fn)
......
......@@ -50,6 +50,7 @@ import six
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
# pylint: disable=unused-import
from tensorflow.python.ops import gen_string_ops
......@@ -107,7 +108,7 @@ def string_split(source, delimiter=" "): # pylint: disable=invalid-name
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return ops.SparseTensor(indices, values, shape)
return sparse_tensor.SparseTensor(indices, values, shape)
ops.NotDifferentiable("StringToHashBucket")
......
......@@ -31,6 +31,7 @@ from tensorflow.python import summary
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
......@@ -407,7 +408,7 @@ def _store_sparse_tensors(tensor_list, enqueue_many, shared_map_ops=None):
maybe_shared_map_ops = shared_map_ops or [None] * len(tensor_list)
def _sparse_meta_data(t, storing_op, map_op):
if not isinstance(t, ops.SparseTensor):
if not isinstance(t, sparse_tensor.SparseTensor):
return _SparseMetaData(False, None, None)
rank = t.shape.get_shape().with_rank(1)[0]
if enqueue_many:
......@@ -418,7 +419,7 @@ def _store_sparse_tensors(tensor_list, enqueue_many, shared_map_ops=None):
sparse=True, map_op=map_op or storing_op, rank=rank)
def _maybe_store(t, shared_map_op):
if not isinstance(t, ops.SparseTensor):
if not isinstance(t, sparse_tensor.SparseTensor):
return t
map_op_name = shared_map_op.name if shared_map_op else None
return (_store_many_sparse(t, shared_name=map_op_name) if enqueue_many
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册