Skip to content

Commit b3bd82f

Browse files
Merge pull request #5 from tensorflow/master
Stay up to date
2 parents 2ee908a + 885861a commit b3bd82f

File tree

433 files changed

+16876
-3631
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

433 files changed

+16876
-3631
lines changed

CONTRIBUTING.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,12 @@ There are two ways to run TensorFlow unit tests.
200200
bazel test ${flags} //tensorflow/python/...
201201
```
202202

203+
For a single component e.g. softmax op:
204+
205+
```bash
206+
bazel test ${flags} tensorflow/python/kernel_tests:softmax_op_test
207+
```
208+
203209
2. Using [Docker](https://www.docker.com) and TensorFlow's CI scripts.
204210
205211
```bash

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ Container Type | Status | Art
165165
* [Learn ML with TensorFlow](https://www.tensorflow.org/resources/learn-ml)
166166
* [TensorFlow Twitter](https://twitter.com/tensorflow)
167167
* [TensorFlow YouTube](https://www.youtube.com/channel/UC0rqucBdTuFTjJiefW5t-IQ)
168-
* [TensorFlow Roadmap](https://www.tensorflow.org/model_optimization/guide/roadmap)
168+
* [TensorFlow model optimization roadmap](https://www.tensorflow.org/model_optimization/guide/roadmap)
169169
* [TensorFlow White Papers](https://www.tensorflow.org/about/bib)
170170
* [TensorBoard Visualization Toolkit](https://github.com/tensorflow/tensorboard)
171171

RELEASE.md

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,16 @@
3535
* TF Core:
3636
* Added `tf.saved_model.experimental.TrackableResource`, which allows the
3737
creation of custom wrapper objects for resource tensors.
38-
38+
* Added `tf.lookup.experimental.MutableHashTable`, which provides a
39+
generic mutable hash table implementation.
40+
* Compared to `tf.lookup.experimental.DenseHashTable` this offers
41+
lower overall memory usage, and a cleaner API. It does not require
42+
specifying a `delete_key` and `empty_key` that cannot be inserted into
43+
the table.
44+
* `tf.data`:
45+
* Promoting `tf.data.experimental.get_single_element` API to
46+
`tf.data.Dataset.get_single_element` and deprecating the experimental
47+
endpoint.
3948
* `tf.lite`:
4049
* Fix mean op reference quantization rounding issue.
4150

tensorflow/c/eager/BUILD

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -672,6 +672,7 @@ tf_cuda_cc_test(
672672
":c_api_test_util",
673673
":tfe_op_internal",
674674
":tfe_tensorhandle_internal",
675+
"@com_google_absl//absl/strings",
675676
"//tensorflow/c:c_test_util",
676677
"//tensorflow/core:lib",
677678
"//tensorflow/core:lib_internal",
@@ -680,7 +681,9 @@ tf_cuda_cc_test(
680681
"//tensorflow/core:test_main",
681682
"//tensorflow/core/common_runtime/eager:eager_operation",
682683
"//tensorflow/core/common_runtime/eager:tensor_handle",
683-
"@com_google_absl//absl/strings",
684+
# copybara:uncomment_begin
685+
# "@tf_runtime//backends/cpu:tf_ops_alwayslink",
686+
# copybara:uncomment_end
684687
],
685688
)
686689

tensorflow/c/eager/gradient_checker.cc

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -50,11 +50,11 @@ Status RunAndMaybeSum(AbstractContext* ctx, Model forward,
5050
absl::Span<AbstractTensorHandle* const> inputs,
5151
absl::Span<AbstractTensorHandle*> outputs,
5252
bool use_function) {
53-
std::vector<AbstractTensorHandle*> model_outputs(1);
53+
AbstractTensorHandle* model_outputs[1];
5454

5555
// Run the model.
56-
TF_RETURN_IF_ERROR(RunModel(forward, ctx, inputs,
57-
absl::MakeSpan(model_outputs), use_function));
56+
TF_RETURN_IF_ERROR(
57+
RunModel(forward, ctx, inputs, model_outputs, use_function));
5858
AbstractTensorHandlePtr model_out(model_outputs[0]);
5959

6060
TF_Tensor* model_out_tensor;
@@ -83,8 +83,8 @@ Status RunAndMaybeSum(AbstractContext* ctx, Model forward,
8383
}
8484

8585
// Reduce sum the output on all dimensions.
86-
TF_RETURN_IF_ERROR(
87-
ops::Sum(ctx, model_out.get(), sum_dims.get(), outputs, "sum_output"));
86+
TF_RETURN_IF_ERROR(ops::Sum(ctx, model_out.get(), sum_dims.get(), &outputs[0],
87+
"sum_output"));
8888
return Status::OK();
8989
}
9090
// ========================= End Helper Functions==============================
@@ -122,7 +122,7 @@ Status CalcNumericalGrad(AbstractContext* ctx, Model forward,
122122
// Initialize auxilary data structures.
123123
vector<float> thetaPlus_data(num_elems);
124124
vector<float> thetaMinus_data(num_elems);
125-
std::vector<AbstractTensorHandle*> f_outputs(1);
125+
AbstractTensorHandle* f_outputs[1];
126126

127127
// Numerical Grad Check
128128
for (int i = 0; i < num_elems; i++) {
@@ -164,25 +164,25 @@ Status CalcNumericalGrad(AbstractContext* ctx, Model forward,
164164

165165
// Get f(theta + eps):
166166
theta_inputs[input_index] = thetaPlus.get();
167-
TF_RETURN_IF_ERROR(RunAndMaybeSum(ctx, forward, theta_inputs,
168-
absl::MakeSpan(f_outputs), use_function));
167+
TF_RETURN_IF_ERROR(
168+
RunAndMaybeSum(ctx, forward, theta_inputs, f_outputs, use_function));
169169
AbstractTensorHandlePtr fPlus(f_outputs[0]);
170170

171171
// Get f(theta - eps):
172172
theta_inputs[input_index] = thetaMinus.get();
173-
TF_RETURN_IF_ERROR(RunAndMaybeSum(ctx, forward, theta_inputs,
174-
absl::MakeSpan(f_outputs), use_function));
173+
TF_RETURN_IF_ERROR(
174+
RunAndMaybeSum(ctx, forward, theta_inputs, f_outputs, use_function));
175175
AbstractTensorHandlePtr fMinus(f_outputs[0]);
176176

177177
// Take Difference of both estimates: (f(theta + eps) - f(theta - eps)).
178-
TF_RETURN_IF_ERROR(ops::Sub(ctx, fPlus.get(), fMinus.get(),
179-
absl::MakeSpan(f_outputs), "sub_top"));
178+
TF_RETURN_IF_ERROR(
179+
ops::Sub(ctx, fPlus.get(), fMinus.get(), f_outputs, "sub_top"));
180180
AbstractTensorHandlePtr fDiff(f_outputs[0]);
181181

182182
// Calculate using the difference quotient definition:
183183
// (f(theta + eps) - f(theta - eps)) / (2 * eps).
184-
TF_RETURN_IF_ERROR(ops::Div(ctx, fDiff.get(), two_eps.get(),
185-
absl::MakeSpan(f_outputs), "diff_quotient"));
184+
TF_RETURN_IF_ERROR(
185+
ops::Div(ctx, fDiff.get(), two_eps.get(), f_outputs, "diff_quotient"));
186186
AbstractTensorHandlePtr diff_quotient(f_outputs[0]);
187187

188188
TF_Tensor* grad_tensor;

tensorflow/c/eager/gradient_checker_test.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,15 +65,15 @@ void CompareNumericalAndManualGradients(
6565
Status MatMulModel(AbstractContext* ctx,
6666
absl::Span<AbstractTensorHandle* const> inputs,
6767
absl::Span<AbstractTensorHandle*> outputs) {
68-
return ops::MatMul(ctx, inputs[0], inputs[1], outputs, "MatMul",
68+
return ops::MatMul(ctx, inputs[0], inputs[1], &outputs[0], "MatMul",
6969
/*transpose_a=*/false,
7070
/*transpose_b=*/false);
7171
}
7272

7373
Status MulModel(AbstractContext* ctx,
7474
absl::Span<AbstractTensorHandle* const> inputs,
7575
absl::Span<AbstractTensorHandle*> outputs) {
76-
return ops::Mul(ctx, inputs[0], inputs[1], outputs, "Mul");
76+
return ops::Mul(ctx, inputs[0], inputs[1], &outputs[0], "Mul");
7777
}
7878

7979
// TODO(vnvo2409): Add more tests from `python/ops/gradient_checker_v2_test.py`.

tensorflow/c/eager/gradients_test.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -119,11 +119,11 @@ Status RecordOperationWithNullGradientFunctionModel(
119119
absl::Span<AbstractTensorHandle*> outputs) {
120120
Tape tape(/*persistent=*/false);
121121
tape.Watch(inputs[0]);
122-
std::vector<AbstractTensorHandle*> neg_outputs(1);
123-
TF_RETURN_IF_ERROR(
124-
ops::Neg(ctx, inputs[0], absl::MakeSpan(neg_outputs), "Neg"));
125-
tape.RecordOperation(inputs, neg_outputs, nullptr, "Neg");
126-
return tape.ComputeGradient(ctx, /*targets=*/neg_outputs,
122+
AbstractTensorHandle* neg_output;
123+
TF_RETURN_IF_ERROR(ops::Neg(ctx, inputs[0], &neg_output, "Neg"));
124+
tape.RecordOperation(inputs, {neg_output}, nullptr, "Neg");
125+
return tape.ComputeGradient(ctx,
126+
/*targets=*/{neg_output},
127127
/*sources=*/inputs,
128128
/*output_gradients=*/{}, outputs);
129129
}

tensorflow/c/experimental/gradients/custom_gradient_test.cc

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -70,20 +70,17 @@ Status ExpWithPassThroughGrad(AbstractContext* ctx,
7070
absl::Span<AbstractTensorHandle*> outputs) {
7171
Tape tape(/*persistent=*/false);
7272
tape.Watch(inputs[0]); // Watch x.
73-
std::vector<AbstractTensorHandle*> exp_outputs(1);
74-
TF_RETURN_IF_ERROR(
75-
ops::Exp(ctx, inputs[0], absl::MakeSpan(exp_outputs), "Exp"));
73+
AbstractTensorHandle* exp_output;
74+
TF_RETURN_IF_ERROR(ops::Exp(ctx, inputs[0], &exp_output, "Exp"));
7675
std::unique_ptr<GradientFunction> gradient_function(
7776
new PassThroughGradientFunction);
78-
tape.RecordOperation(inputs, exp_outputs, gradient_function.release());
77+
tape.RecordOperation(inputs, {exp_output}, gradient_function.release());
7978
TF_RETURN_IF_ERROR(tape.ComputeGradient(ctx,
80-
/*targets*/ exp_outputs,
79+
/*targets*/ {exp_output},
8180
/*sources=*/inputs,
8281
/*output_gradients=*/{},
8382
/*result=*/outputs));
84-
for (auto exp_output : exp_outputs) {
85-
exp_output->Unref();
86-
}
83+
exp_output->Unref();
8784
return Status::OK();
8885
}
8986

0 commit comments

Comments
 (0)