Skip to content

Commit

Permalink
Merge changes from github.
Browse files Browse the repository at this point in the history
END_PUBLIC

---
Commit c2b8927 authored by Dandelion Man?<[email protected]>
Committed by TensorFlower Gardener<[email protected]>:
Fix another d3v4 regression in the graph visualizer.

PiperOrigin-RevId: 156343038

---
Commit 170f0b3 authored by Peter Hawkins<[email protected]>
Committed by TensorFlower Gardener<[email protected]>:
[TF:XLA] Add XLA implementation of ResourceStridedSliceAssign.

PiperOrigin-RevId: 156341053

---
Commit 1390dd6 authored by Vijay Vasudevan<[email protected]>
Committed by TensorFlower Gardener<[email protected]>:
When Op Type is not registered, log the hostname of the machine that
it is running on in the error message, since the message could be routed
back during a failure on a remote binary, and it is hard to tell which
machine it came from.

Ideally, we'd somehow log the name of the binary running instead, but
we don't have a function to get that right now.

PiperOrigin-RevId: 156337679

---
Commit 9ca8a15 authored by A. Unique TensorFlower<[email protected]>
Committed by TensorFlower Gardener<[email protected]>:
Internal change.

PiperOrigin-RevId: 156335942

---
Commit 4025543 authored by Martin Wicke<[email protected]>
Committed by TensorFlower Gardener<[email protected]>:
Deprecate contrib/learn/dataframe. To be removed June 15.

PiperOrigin-RevId: 156333930

---
Commit 7f71b7f authored by A. Unique TensorFlower<[email protected]>
Committed by TensorFlower Gardener<[email protected]>:
BEGIN_PUBLIC
Automated g4 rollback of changelist 156123287

PiperOrigin-RevId: 156503903
  • Loading branch information
tensorflower-gardener committed May 19, 2017
1 parent c311af0 commit 53cb26d
Show file tree
Hide file tree
Showing 123 changed files with 1,973 additions and 502 deletions.
63 changes: 63 additions & 0 deletions tensorflow/c/generate-pc.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#!/usr/bin/env bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

TF_PREFIX='/usr/local'

usage() {
echo "Usage: $0 OPTIONS"
echo -e "-p, --prefix\tset installation prefix (default: /usr/local)"
echo -e "-v, --version\tset TensorFlow version"
echo -e "-h, --help\tdisplay this message"
}

# read the options
ARGS=`getopt -o p:v:h --long prefix:,version:,help -n $0 -- "$@"`
eval set -- "$ARGS"

# extract options and their arguments into variables.
while true ; do
case "$1" in
-h|--help) usage ; exit ;;
-p|--prefix)
case "$2" in
"") shift 2 ;;
*) TF_PREFIX=$2 ; shift 2 ;;
esac ;;
-v|--version)
case "$2" in
"") shift 2 ;;
*) TF_VERSION=$2 ; shift 2 ;;
esac ;;
--) shift ; echo "Try '$0 --help' for more information."; exit 1 ;;
*) echo "Internal error! Try '$0 --help' for more information." ; exit 1 ;;
esac
done

echo "Generating pkgconfig file for TensorFlow $TF_VERSION in $TF_PREFIX"

cat << EOF > tensorflow.pc
prefix=${TF_PREFIX}
exec_prefix=\${prefix}
libdir=\${exec_prefix}/lib
includedir=\${prefix}/include
Name: TensorFlow
Version: ${TF_VERSION}
Description: Library for computation using data flow graphs for scalable machine learning
Requires:
Libs: -L\${libdir} -ltensorflow
Cflags: -I\${includedir}
EOF
2 changes: 1 addition & 1 deletion tensorflow/cc/framework/gradients_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ TEST_F(GradientsTest, StackUnstack_StopBackprop) {
}

TEST_F(GradientsTest, DependentGradOutputs) {
// Tests that dependant gradients (in this case the gradients w.r.t to the
// Tests that dependent gradients (in this case the gradients w.r.t to the
// output and one input of MatMul) are computed properly.

// Create two chained MatMul ops.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/cc/saved_model/loader.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ auto* load_attempt_count = monitoring::Counter<2>::New(
"status");
auto* load_latency = monitoring::Counter<1>::New(
"/tensorflow/cc/saved_model/load_latency",
"Latency in microseconds for SavedModels that were succesfully loaded.",
"Latency in microseconds for SavedModels that were successfully loaded.",
"model_path");
constexpr char kLoadAttemptFail[] = "fail";
constexpr char kLoadAttemptSuccess[] = "success";
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/aot/codegen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ Status GenerateHeader(const HeaderOpts& opts, const Config& config,
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace Eigen { class ThreadPoolDevice; }
namespace Eigen { struct ThreadPoolDevice; }
// (Implementation detail) Entry point to the function in the object file.
extern "C" void {{ENTRY}}(
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/aot/codegen_test_h.golden
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"

namespace Eigen { class ThreadPoolDevice; }
namespace Eigen { struct ThreadPoolDevice; }

// (Implementation detail) Entry point to the function in the object file.
extern "C" void entry_point(
Expand Down
10 changes: 9 additions & 1 deletion tensorflow/compiler/aot/runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ namespace {
inline void* aligned_malloc(size_t size, int minimum_alignment) {
#if defined(__ANDROID__) || defined(OS_ANDROID) || defined(OS_CYGWIN)
return memalign(minimum_alignment, size);
#elif defined(COMPILER_MSVC)
return _aligned_malloc(size, minimum_alignment);
#else // !__ANDROID__ && !OS_ANDROID && !OS_CYGWIN
void* ptr = nullptr;
// posix_memalign requires that the requested alignment be at least
Expand All @@ -45,7 +47,13 @@ inline void* aligned_malloc(size_t size, int minimum_alignment) {
#endif
}

inline void aligned_free(void* aligned_memory) { free(aligned_memory); }
inline void aligned_free(void* aligned_memory) {
#if defined(COMPILER_MSVC)
_aligned_free(aligned_memory);
#else
free(aligned_memory);
#endif
}

size_t align_to(size_t n, size_t align) {
return (((n - 1) / align) + 1) * align;
Expand Down
13 changes: 13 additions & 0 deletions tensorflow/compiler/tests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,19 @@ tf_xla_py_test(
],
)

tf_xla_py_test(
name = "slice_ops_test",
size = "small",
srcs = ["slice_ops_test.py"],
deps = [
":xla_test",
"//tensorflow/python:array_ops",
"//tensorflow/python:data_flow_ops",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:platform_test",
],
)

tf_xla_py_test(
name = "function_test",
size = "small",
Expand Down
132 changes: 132 additions & 0 deletions tensorflow/compiler/tests/slice_ops_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slicing."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np

from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest


class SliceTest(XLATestCase):

def test1D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.slice(i, [2], [4])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)

self.assertAllEqual([2, 3, 4, 5], result)

def test3D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.slice(i, [1, 2, 2], [1, 1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5], [1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)

self.assertAllEqual([[[6, 5, 4, 3]]], result)


class StridedSliceTest(XLATestCase):

def test1D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [2], [6], [2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)

self.assertAllEqual([2, 4], result)

def test1DNegtiveStride(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [6], [2], [-2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)

self.assertAllEqual([6, 4], result)

def test3D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [0, 2, 2], [2, 3, 6], [1, 1, 2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5], [1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)

self.assertAllEqual([[[1, 9]], [[6, 4]]], result)

def test3DNegativeStride(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 4, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [2, 2, 6], [0, 0, 2], [-1, -1, -2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0], [4, 5, 2, 4, 3, 7, 6, 8, 9,
4]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5], [4, 3, 4, 5, 7, 6, 5, 3, 4, 5],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7], [7, 1, 7, 1, 8, 1, 8, 1, 3,
1]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5], [1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9], [9, 9, 5, 5, 6, 6, 3, 3, 6,
6]]]
}
result = o.eval(feed_dict=params)

self.assertAllEqual([[[9, 8], [1, 1]], [[2, 4], [5, 7]]], result)


if __name__ == "__main__":
googletest.main()
1 change: 1 addition & 0 deletions tensorflow/compiler/tf2xla/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ cc_library(
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/client:client_library",
"//tensorflow/compiler/xla/client:computation",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/gather_functor.h"
#include "tensorflow/core/platform/dynamic_annotations.h"
#include "tensorflow/core/platform/macros.h"

namespace tensorflow {

Expand Down Expand Up @@ -63,7 +64,6 @@ EIGEN_STRONG_INLINE void gather_float_int32_xla_impl(float* out, void** data) {

// Implements gather on CPU. This is called by an XLA custom call, set up by
// gather_op.cc.
extern "C" void __attribute__((visibility("default")))
gather_float_int32_xla_impl(float* out, void** data) {
extern "C" void TF_EXPORT gather_float_int32_xla_impl(float* out, void** data) {
tensorflow::gather_float_int32_xla_impl(out, data);
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/gather_functor.h"
#include "tensorflow/core/platform/dynamic_annotations.h"
#include "tensorflow/core/platform/macros.h"

namespace tensorflow {

Expand Down Expand Up @@ -63,7 +64,6 @@ EIGEN_STRONG_INLINE void gather_float_int64_xla_impl(float* out, void** data) {

// Implements gather on CPU. This is called by an XLA custom call, set up by
// gather_op.cc.
extern "C" void __attribute__((visibility("default")))
gather_float_int64_xla_impl(float* out, void** data) {
extern "C" void TF_EXPORT gather_float_int64_xla_impl(float* out, void** data) {
tensorflow::gather_float_int64_xla_impl(out, data);
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License.
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/dynamic_annotations.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"

namespace tensorflow {
Expand All @@ -43,7 +44,6 @@ EIGEN_STRONG_INLINE void argmax_float_1d_xla_impl(void* out, void** data) {

// Implements argmax on CPU. This is called by an XLA custom call, set up by
// index_ops.cc.
extern "C" void __attribute__((visibility("default")))
argmax_float_1d_xla_impl(void* out, void** data) {
extern "C" void TF_EXPORT argmax_float_1d_xla_impl(void* out, void** data) {
tensorflow::argmax_float_1d_xla_impl(out, data);
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License.
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/dynamic_annotations.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"

namespace tensorflow {
Expand Down Expand Up @@ -45,7 +46,6 @@ EIGEN_STRONG_INLINE void argmax_float_2d_xla_impl(void* out, void** data) {

// Implements argmax on CPU. This is called by an XLA custom call, set up by
// index_ops.cc.
extern "C" void __attribute__((visibility("default")))
argmax_float_2d_xla_impl(void* out, void** data) {
extern "C" void TF_EXPORT argmax_float_2d_xla_impl(void* out, void** data) {
tensorflow::argmax_float_2d_xla_impl(out, data);
}
Loading

0 comments on commit 53cb26d

Please sign in to comment.