Skip to content
This repository was archived by the owner on May 30, 2019. It is now read-only.

Commit 1692b92

Browse files
committedNov 22, 2017
add tensorflow bindings
1 parent 5e79fda commit 1692b92

13 files changed

+2346
-4
lines changed
 

‎.clang-format

+22
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
---
2+
Language: Cpp
3+
BasedOnStyle: Google
4+
5+
AllowShortFunctionsOnASingleLine: Empty
6+
AllowShortIfStatementsOnASingleLine: false
7+
AllowShortLoopsOnASingleLine: false
8+
AlwaysBreakTemplateDeclarations: true
9+
BinPackArguments: false
10+
BinPackParameters: false
11+
BreakBeforeBraces: Attach
12+
ColumnLimit: 79
13+
Cpp11BracedListStyle: true
14+
DerivePointerAlignment: false
15+
IndentWrappedFunctionNames: true
16+
MaxEmptyLinesToKeep: 1
17+
PointerAlignment: Left
18+
SpaceAfterCStyleCast: true
19+
SpacesInContainerLiterals: false
20+
21+
# Alas, not supported:
22+
# ForceEmptyLineAtEOF: true

‎.gitignore

+43-2
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,43 @@
1-
dist/
2-
website/dist/
1+
*_BASE_*
2+
*_BACKUP_*
3+
*_REMOTE_*
4+
*_LOCAL_*
5+
*.opensdf
6+
*.orig
7+
*.sdf
8+
*.sln
9+
*.suo
10+
*.TMP
11+
*.user
12+
*.VC.db
13+
*.VC.opendb
14+
*.vcxproj
15+
*.vcxproj.filters
16+
*.vcxproj.user
17+
*.vspx
18+
19+
/*.build/
20+
/*.dir/
21+
/*.xcodeproj/
22+
23+
/.vs/
24+
/.vscode/
25+
/bin/
26+
/build/
27+
/CMakeFiles/
28+
/CMakeScripts/
29+
/Debug/
30+
/dist/
31+
/ipch/
32+
/lib/
33+
/MinSizeRel/
34+
/out/
35+
/Release/
36+
/RelWithDebInfo/
37+
/x64/
38+
/website/dist/
39+
/Win32/
40+
41+
/CMakeCache.txt
42+
/cmake_install.cmake
43+
/Makefile

‎.travis.yml

+2
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,10 @@ language: node_js
22
node_js:
33
- '8.8'
44
script:
5+
- npm install .
56
- node ./node_modules/webpack/bin/webpack.js
67
- ./node_modules/typescript/bin/tsc
78
- node dist/backprop_test.js
89
- node dist/tensor_test.js
910
- node dist/util_test.js
11+
- node test-tensorflow-binding.js

‎binding.cc

+303
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,303 @@
1+
#include <assert.h>
2+
#include <stdio.h>
3+
#include <stdlib.h>
4+
#include <string.h>
5+
6+
#include <node_api.h>
7+
8+
#include "libtensorflow/include/c_api.h"
9+
#include "libtensorflow/include/eager_c_api.h"
10+
11+
static const size_t kMaxDims = 10;
12+
13+
typedef struct tensor_wrap {
14+
napi_env env;
15+
TF_Tensor* tf_tensor;
16+
TFE_TensorHandle* tf_tensor_handle;
17+
napi_ref js_typed_array;
18+
} tensor_wrap_t;
19+
20+
static void release_typed_array(void* data,
21+
size_t len,
22+
void* tensor_wrap_ptr) {
23+
auto tensor_wrap = static_cast<tensor_wrap_t*>(tensor_wrap_ptr);
24+
25+
puts("release_typed_array");
26+
27+
assert(tensor_wrap->js_typed_array != NULL);
28+
napi_status status =
29+
napi_delete_reference(tensor_wrap->env, tensor_wrap->js_typed_array);
30+
assert(status == napi_ok);
31+
tensor_wrap->js_typed_array = NULL;
32+
}
33+
34+
static void tensor_delete(napi_env env, void* tensor_wrap_ptr, void* hint) {
35+
auto tensor_wrap = static_cast<tensor_wrap_t*>(tensor_wrap_ptr);
36+
napi_status status;
37+
38+
puts("tensor_delete");
39+
40+
if (tensor_wrap->tf_tensor_handle != NULL)
41+
TFE_DeleteTensorHandle(tensor_wrap->tf_tensor_handle);
42+
43+
if (tensor_wrap->tf_tensor != NULL)
44+
TF_DeleteTensor(tensor_wrap->tf_tensor);
45+
46+
/* At this point, the typed array should no longer be referenced, because
47+
* tensorflow should have called release_typed_array(). But since it isn't
48+
* clear what happens when TF_NewTensor() fails, double check here and clean
49+
* up if necessary. */
50+
if (tensor_wrap->js_typed_array != NULL) {
51+
status =
52+
napi_delete_reference(tensor_wrap->env, tensor_wrap->js_typed_array);
53+
assert(status == napi_ok);
54+
}
55+
56+
delete tensor_wrap;
57+
}
58+
59+
static napi_value tensor_new(napi_env env, napi_callback_info info) {
60+
napi_status napi_status;
61+
62+
/* Check whether this function is called as a construct call. */
63+
napi_value js_target;
64+
napi_status = napi_get_new_target(env, info, &js_target);
65+
assert(napi_status == napi_ok);
66+
if (js_target == NULL) {
67+
napi_throw_type_error(env, "EINVAL", "Function not used as a constructor");
68+
return NULL;
69+
}
70+
71+
/* Fetch JavaScript `this` object and function arguments. */
72+
size_t argc = 2;
73+
napi_value args[2];
74+
napi_value js_this;
75+
napi_status = napi_get_cb_info(env, info, &argc, args, &js_this, NULL);
76+
assert(napi_status == napi_ok);
77+
78+
napi_value js_array = args[0];
79+
napi_value js_dims = args[1];
80+
81+
/* Check whether the first argument is a typed array. */
82+
bool is_typed_array;
83+
napi_status = napi_is_typedarray(env, js_array, &is_typed_array);
84+
assert(napi_status == napi_ok);
85+
86+
if (!is_typed_array) {
87+
napi_throw_type_error(
88+
env, "EINVAL", "First argument should be a TypedArray");
89+
return NULL;
90+
}
91+
92+
/* Get information about the typed array. */
93+
napi_typedarray_type js_array_type;
94+
size_t js_array_length;
95+
void* js_array_data;
96+
napi_status = napi_get_typedarray_info(env,
97+
js_array,
98+
&js_array_type,
99+
&js_array_length,
100+
&js_array_data,
101+
NULL,
102+
NULL);
103+
assert(napi_status == napi_ok);
104+
105+
/* Map to tensorflow type. */
106+
size_t width;
107+
TF_DataType tf_type;
108+
109+
switch (js_array_type) {
110+
case napi_int8_array:
111+
width = sizeof(int8_t);
112+
tf_type = TF_INT8;
113+
break;
114+
case napi_uint8_array:
115+
case napi_uint8_clamped_array:
116+
width = sizeof(uint8_t);
117+
tf_type = TF_UINT8;
118+
break;
119+
case napi_int16_array:
120+
width = sizeof(int16_t);
121+
tf_type = TF_INT16;
122+
break;
123+
case napi_uint16_array:
124+
width = sizeof(uint16_t);
125+
tf_type = TF_UINT16;
126+
break;
127+
case napi_int32_array:
128+
width = sizeof(int32_t);
129+
tf_type = TF_INT32;
130+
break;
131+
case napi_uint32_array:
132+
width = sizeof(uint32_t);
133+
tf_type = TF_UINT32;
134+
case napi_float32_array:
135+
width = sizeof(float);
136+
tf_type = TF_FLOAT;
137+
break;
138+
case napi_float64_array:
139+
width = sizeof(double);
140+
tf_type = TF_DOUBLE;
141+
break;
142+
default:
143+
napi_throw_type_error(env, "EINVAL", "Unsupported TypedArray type.");
144+
return 0;
145+
}
146+
147+
/* Build the array containing the dimensions. */
148+
int64_t dims[kMaxDims];
149+
uint32_t i, num_dims;
150+
bool b;
151+
152+
napi_status = napi_is_array(env, js_dims, &b);
153+
assert(napi_status == napi_ok);
154+
if (!b) {
155+
napi_throw_range_error(
156+
env, "EINVAL", "Second argument should be an Array");
157+
return NULL;
158+
}
159+
160+
napi_status = napi_get_array_length(env, js_dims, &num_dims);
161+
assert(napi_status == napi_ok);
162+
if (num_dims < 1 || num_dims > kMaxDims) {
163+
napi_throw_range_error(env, "ERANGE", "Invalid number of dimensions");
164+
return NULL;
165+
}
166+
167+
for (i = 0; i < num_dims; i++) {
168+
napi_value element;
169+
int64_t value;
170+
171+
napi_status = napi_get_element(env, js_dims, i, &element);
172+
assert(napi_status == napi_ok);
173+
174+
napi_status = napi_get_value_int64(env, element, &value);
175+
if (napi_status == napi_number_expected) {
176+
napi_throw_range_error(
177+
env, "ERANGE", "Dimension size should be a number");
178+
return NULL;
179+
} else if (value <= 0) {
180+
napi_throw_range_error(env, "ERANGE", "Dimension size out of range");
181+
return NULL;
182+
}
183+
assert(napi_status == napi_ok);
184+
185+
dims[i] = value;
186+
}
187+
188+
/* Construct the native wrap object. */
189+
tensor_wrap_t* tensor_wrap = new tensor_wrap_t();
190+
if (tensor_wrap == NULL) {
191+
napi_throw_error(env, "ENOMEM", "Out of memory");
192+
return NULL;
193+
}
194+
195+
/* Attach native wrapper to the JavaScript object. */
196+
tensor_wrap->env = env;
197+
napi_status =
198+
napi_wrap(env, js_this, tensor_wrap, tensor_delete, NULL, NULL);
199+
assert(napi_status == napi_ok);
200+
201+
/* Store a TypedArray reference in the native wrapper. This must be done
202+
* before calling TF_NewTensor, because TF_NewTensor might recursively invoke
203+
* the release_typed_array function that clears the reference. */
204+
napi_status =
205+
napi_create_reference(env, js_array, 1, &tensor_wrap->js_typed_array);
206+
assert(napi_status == napi_ok);
207+
208+
/* Construct the TF_Tensor object. */
209+
size_t byte_length = js_array_length * width;
210+
TF_Tensor* tf_tensor = TF_NewTensor(tf_type,
211+
dims,
212+
num_dims,
213+
js_array_data,
214+
byte_length,
215+
release_typed_array,
216+
tensor_wrap);
217+
if (tf_tensor == NULL) {
218+
napi_throw_error(env, "ENOMEM", "Out of memory");
219+
return NULL;
220+
}
221+
tensor_wrap->tf_tensor = tf_tensor;
222+
223+
/* Create the TFE_TensorHandle object. */
224+
TF_Status* tf_status = TF_NewStatus();
225+
if (tf_status == NULL) {
226+
napi_throw_error(env, "ENOMEM", "Out of memory");
227+
return NULL;
228+
}
229+
TFE_TensorHandle* tf_tensor_handle =
230+
TFE_NewTensorHandle(tf_tensor, tf_status);
231+
if (TF_GetCode(tf_status) != TF_OK) {
232+
napi_throw_error(env, NULL, TF_Message(tf_status));
233+
TF_DeleteStatus(tf_status);
234+
return NULL;
235+
}
236+
TF_DeleteStatus(tf_status);
237+
tensor_wrap->tf_tensor_handle = tf_tensor_handle;
238+
239+
return js_this;
240+
}
241+
242+
static napi_value tensor_get_device(napi_env env, napi_callback_info info) {
243+
napi_status napi_status;
244+
245+
/* Fetch JavaScript `this` object. */
246+
napi_value js_this;
247+
napi_status = napi_get_cb_info(env, info, NULL, NULL, &js_this, NULL);
248+
assert(napi_status == napi_ok);
249+
250+
/* Unwrap. */
251+
tensor_wrap_t* tensor_wrap;
252+
napi_status =
253+
napi_unwrap(env, js_this, reinterpret_cast<void**>(&tensor_wrap));
254+
assert(napi_status == napi_ok);
255+
256+
/* Ask tensorflow for the device name. */
257+
const char* device =
258+
TFE_TensorHandleDeviceName(tensor_wrap->tf_tensor_handle);
259+
260+
/* Build JavaScript string containing the device name. */
261+
napi_value js_device;
262+
napi_status =
263+
napi_create_string_utf8(env, device, NAPI_AUTO_LENGTH, &js_device);
264+
assert(napi_status == napi_ok);
265+
266+
return js_device;
267+
}
268+
269+
static napi_value init(napi_env env, napi_value exports) {
270+
napi_status status;
271+
272+
/* Define the Tensor JavaScript class. */
273+
napi_value tensor_class;
274+
napi_property_descriptor tensor_properties[] = {{"device",
275+
NULL,
276+
NULL,
277+
tensor_get_device,
278+
NULL,
279+
NULL,
280+
napi_default,
281+
NULL}};
282+
status = napi_define_class(
283+
env,
284+
"Tensor", /* JavaScript class name */
285+
NAPI_AUTO_LENGTH, /* JavasScript class name length */
286+
tensor_new, /* Constructor */
287+
NULL, /* Constructor argument */
288+
1, /* Property count */
289+
tensor_properties, /* Property descriptors */
290+
&tensor_class); /* Out, Javascript value representing the class */
291+
292+
assert(status == napi_ok);
293+
294+
/* Stick the Tensor class onto the exports object. */
295+
napi_property_descriptor descriptor = {
296+
"Tensor", NULL, NULL, NULL, NULL, tensor_class, napi_default, NULL};
297+
status = napi_define_properties(env, exports, 1, &descriptor);
298+
assert(status == napi_ok);
299+
300+
return exports;
301+
}
302+
303+
NAPI_MODULE(tensorflow_binding, init)

‎binding.gyp

+87
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
{
2+
'variables': {
3+
'tensorflow_include_dir': '<(module_root_dir)/libtensorflow/include',
4+
'tensorflow_headers': [
5+
'<@(tensorflow_include_dir)/c_api.h',
6+
'<@(tensorflow_include_dir)/eager_c_api.h'
7+
]
8+
},
9+
'targets': [
10+
{
11+
'target_name': 'tensorflow-binding',
12+
'sources': [ 'binding.cc' ],
13+
'conditions': [
14+
['OS=="win"', {
15+
'defines': [ 'COMPILER_MSVC' ],
16+
'libraries': [ 'tensorflow' ],
17+
'library_dirs': [ '<(INTERMEDIATE_DIR)' ],
18+
'actions': [
19+
{
20+
'action_name': 'generate-def',
21+
'inputs': [
22+
'<(module_root_dir)/libtensorflow/generate-def.js',
23+
'<@(tensorflow_headers)'
24+
],
25+
'outputs': [
26+
'<(INTERMEDIATE_DIR)/tensorflow.def'
27+
],
28+
'action': [
29+
'cmd',
30+
'/c node <@(_inputs) > <@(_outputs)'
31+
]
32+
},
33+
{
34+
'action_name': 'build-tensorflow-lib',
35+
'inputs': [
36+
'<(INTERMEDIATE_DIR)/tensorflow.def'
37+
],
38+
'outputs': [
39+
'<(INTERMEDIATE_DIR)/tensorflow.lib'
40+
],
41+
'action': [
42+
'lib',
43+
'/def:<@(_inputs)',
44+
'/out:<@(_outputs)',
45+
'/machine:<@(target_arch)'
46+
]
47+
},
48+
{
49+
'action_name': 'download-dll',
50+
'inputs': [
51+
'<(module_root_dir)/libtensorflow/download-dll.js'
52+
],
53+
'outputs': [
54+
'<(PRODUCT_DIR)/tensorflow.dll'
55+
],
56+
'action': [
57+
'node',
58+
'<@(_inputs)',
59+
'<(PRODUCT_DIR)'
60+
]
61+
}
62+
],
63+
}, { # Linux or OS X
64+
'libraries': [ '-Wl,-rpath,\$$ORIGIN', '-ltensorflow' ],
65+
'library_dirs': [ '<(PRODUCT_DIR)' ],
66+
'actions': [
67+
{
68+
'action_name': 'download-so',
69+
'inputs': [
70+
'<(module_root_dir)/libtensorflow/download-so.js'
71+
],
72+
'outputs': [
73+
'<(PRODUCT_DIR)/libtensorflow.so',
74+
'<(PRODUCT_DIR)/libtensorflow_framework.so'
75+
],
76+
'action': [
77+
'node',
78+
'<@(_inputs)',
79+
'<(PRODUCT_DIR)'
80+
]
81+
}
82+
]
83+
}]
84+
]
85+
}
86+
]
87+
}

‎libtensorflow/README.md

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
Given that there are no usable libtensorflow distributions, the
2+
required files are obtained as follows:
3+
4+
* `libtensorflow.so` and `libtensorflow_framework.so`, which are used
5+
on MacOS and Linux, are downloaded and extracted from the Tensorflow
6+
CI server. This is done by `download-so.js`.
7+
8+
* `tensorflow.dll`, on Windows, is downloaded from the CI by
9+
`download-dll.js`.
10+
11+
* `tensorflow.lib` isn't included in the CI download, so it's generated
12+
from the header files. This is a two-step process:
13+
- `tensorflow.def` is generated from the header files by
14+
`generate-def.js`.
15+
- `tensorflow.lib` is generated from the .def file by the Windows
16+
linker; the build rule is located in the project's `binding.gyp`.
17+
18+
* The header files located in `include/` are copied manually from the
19+
tensorflow source tree. They are checked into the git repository.
20+
Using the (CI) distribution isn't possible, because the headers for
21+
the Tensorflow Eager API aren't included.

‎libtensorflow/download-dll.js

+56
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
2+
/* Download libtensorflow binaries mirrored from the TensorFlow CI.
3+
* Original URL:
4+
* http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow-windows/lastStableBuild/artifact/lib_package/libtensorflow-cpu-windows-x86_64.zip
5+
*/
6+
let downloadUrl = 'http://propelml.org/libtensorflow_20171121/libtensorflow-cpu-windows-x86_64.zip';
7+
8+
// Work around a bug where node-gyp on windows adds a trailing " to PRODUCT_DIR.
9+
let outDir = process.argv[2] || '';
10+
outDir = outDir.replace(/"$/, '');
11+
12+
const fs = require('fs');
13+
const http = require('http');
14+
const path = require('path');
15+
const yauzl = require('yauzl');
16+
17+
fetch(downloadUrl);
18+
19+
function fetch(url) {
20+
console.error('Downloading %s', url);
21+
22+
http.get(url, (res) => {
23+
if (res.statusCode === 502)
24+
return fetch(url); // Tensorflow CI server can be very flaky at times.
25+
else if (res.statusCode !== 200)
26+
throw new Error("Download failed: HTTP " + res.statusCode + "\n" + url);
27+
28+
let buffers = [];
29+
res.on('data', (buf) => buffers.push(buf));
30+
31+
res.on('end', () => {
32+
yauzl.fromBuffer(Buffer.concat(buffers), (err, zip) => {
33+
if (err)
34+
throw err;
35+
36+
zip.on('entry', (entry) => {
37+
let name = path.basename(entry.fileName);
38+
let ext = path.extname(name);
39+
40+
if (ext !== '.dll')
41+
return;
42+
43+
console.error('Extracting %s', name);
44+
45+
zip.openReadStream(entry, (err, stream) => {
46+
if (err)
47+
throw err;
48+
49+
let outPath = path.resolve(outDir, name);
50+
stream.pipe(fs.createWriteStream(outPath));
51+
});
52+
});
53+
});
54+
});
55+
});
56+
}

‎libtensorflow/download-so.js

+47
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
2+
/* Download libtensorflow binaries mirrored from the TensorFlow CI.
3+
* Original URLs:
4+
* http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow/TYPE=mac-slave/lastSuccessfulBuild/artifact/lib_package/libtensorflow-cpu-darwin-x86_64.tar.gz
5+
* http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow/TYPE=cpu-slave/lastSuccessfulBuild/artifact/lib_package/libtensorflow-cpu-linux-x86_64.tar.gz
6+
*/
7+
const downloadUrl = {
8+
darwin: 'http://propelml.org/libtensorflow_20171121/libtensorflow-cpu-darwin-x86_64.tar.gz',
9+
linux: 'http://propelml.org/libtensorflow_20171121/libtensorflow-cpu-linux-x86_64.tar.gz'
10+
}[process.platform];
11+
12+
const outDir = process.argv[2] || '';
13+
14+
const fs = require('fs');
15+
const http = require('http');
16+
const path = require('path');
17+
const tar = require('tar');
18+
19+
fetch(downloadUrl);
20+
21+
function fetch(url) {
22+
console.error('Downloading %s', url);
23+
24+
http.get(url, (res) => {
25+
if (res.statusCode === 502)
26+
return fetch(url); // Tensorflow CI server can be very flaky at times.
27+
else if (res.statusCode !== 200)
28+
throw new Error("Download failed: HTTP " + res.statusCode + "\n" + url);
29+
30+
res.pipe(new tar.Parse({
31+
onentry(entry) {
32+
let name = path.basename(entry.header.path);
33+
let ext = path.extname(name);
34+
35+
if (ext === '.so') {
36+
console.error('Extracting %s', name);
37+
let outPath = path.resolve(outDir, name);
38+
entry.pipe(fs.createWriteStream(outPath));
39+
}
40+
41+
entry.resume();
42+
},
43+
44+
onwarn: console.warn
45+
}));
46+
});
47+
}

‎libtensorflow/generate-def.js

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
2+
const fs = require('fs');
3+
4+
const files = process.argv.slice(2);
5+
6+
const symbols = files
7+
.map((file) => fs.readFileSync(file))
8+
.join('\n')
9+
.split('\n')
10+
.map((line) => {
11+
var match = /^TF_CAPI_EXPORT.*?\s+(\w+)\s*\(/.exec(line);
12+
return match && match[1];
13+
})
14+
.filter((symbol) => symbol !== null);
15+
16+
process.stdout.write('EXPORTS\n' + symbols.join('\n'));

‎libtensorflow/include/c_api.h

+1,511
Large diffs are not rendered by default.

‎libtensorflow/include/eager_c_api.h

+222
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,222 @@
1+
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
==============================================================================*/
15+
16+
#ifndef TENSORFLOW_C_EAGER_C_API_H_
17+
#define TENSORFLOW_C_EAGER_C_API_H_
18+
19+
// C API extensions to experiment with eager execution of kernels.
20+
21+
#include "c_api.h"
22+
23+
// Macro to control visibility of exported symbols in the shared library (.so,
24+
// .dylib, .dll).
25+
// This duplicates the TF_EXPORT macro definition in
26+
// tensorflow/core/platform/macros.h in order to keep this .h file independent
27+
// of any other includes.$a
28+
#ifdef SWIG
29+
#define TF_CAPI_EXPORT
30+
#else
31+
#if defined(COMPILER_MSVC)
32+
#ifdef TF_COMPILE_LIBRARY
33+
#define TF_CAPI_EXPORT __declspec(dllexport)
34+
#else
35+
#define TF_CAPI_EXPORT __declspec(dllimport)
36+
#endif // TF_COMPILE_LIBRARY
37+
#else
38+
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
39+
#endif // COMPILER_MSVC
40+
#endif // SWIG
41+
42+
#ifdef __cplusplus
43+
extern "C" {
44+
#endif
45+
46+
typedef struct TFE_ContextOptions TFE_ContextOptions;
47+
48+
// Return a new options object.
49+
TF_CAPI_EXPORT extern TFE_ContextOptions* TFE_NewContextOptions();
50+
51+
// Set the config in TF_ContextOptions.options.
52+
// config should be a serialized tensorflow.ConfigProto proto.
53+
// If config was not parsed successfully as a ConfigProto, record the
54+
// error information in *status.
55+
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetConfig(
56+
TFE_ContextOptions* options, const void* proto, size_t proto_len,
57+
TF_Status* status);
58+
59+
// Controls how to act when we try to run an operation on a given device but
60+
// some input tensors are not on that device.
61+
typedef enum TFE_ContextDevicePlacementPolicy {
62+
// The default: running operations with input tensors on the wrong device will
63+
// fail.
64+
TFE_DEVICE_PLACEMENT_EXPLICIT = 0,
65+
// Copy the tensor to the right device but log a warning.
66+
TFE_DEVICE_PLACEMENT_WARN = 1,
67+
// Silently copy the tensor, which has a performance cost since the
68+
// operation will be blocked till the copy completes.
69+
TFE_DEVICE_PLACEMENT_SILENT = 2,
70+
} TFE_ContextDevicePlacementPolicy;
71+
72+
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetDevicePlacementPolicy(
73+
TFE_ContextOptions*, TFE_ContextDevicePlacementPolicy);
74+
75+
// Destroy an options object.
76+
TF_CAPI_EXPORT extern void TFE_DeleteContextOptions(TFE_ContextOptions*);
77+
78+
// "Context" under which operations/functions are executed. It encapsulates
79+
// things like the available devices, resource manager etc.
80+
//
81+
// TODO(ashankar): Merge with TF_Session?
82+
typedef struct TFE_Context TFE_Context;
83+
84+
TF_CAPI_EXPORT extern TFE_Context* TFE_NewContext(
85+
const TFE_ContextOptions* opts, TF_Status* status);
86+
TF_CAPI_EXPORT extern void TFE_DeleteContext(TFE_Context* ctx, TF_Status* status);
87+
TF_CAPI_EXPORT extern TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx,
88+
TF_Status* status);
89+
90+
// A handle to a tensor on a device.
91+
//
92+
// Like a TF_Tensor, a TFE_TensorHandle refers to a tensor with a value, shape,
93+
// type etc. Unlike a TF_Tensor, a TFE_TensorHandle may refer to such tensors
94+
// placed in memory of different devices or remote address spaces.
95+
typedef struct TFE_TensorHandle TFE_TensorHandle;
96+
97+
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandle(TF_Tensor* t,
98+
TF_Status* status);
99+
TF_CAPI_EXPORT extern void TFE_DeleteTensorHandle(TFE_TensorHandle* h);
100+
TF_CAPI_EXPORT extern TF_DataType TFE_TensorHandleDataType(TFE_TensorHandle* h);
101+
TF_CAPI_EXPORT extern int TFE_TensorHandleNumDims(TFE_TensorHandle* h);
102+
TF_CAPI_EXPORT extern int64_t TFE_TensorHandleDim(TFE_TensorHandle* h, int dim_index);
103+
TF_CAPI_EXPORT extern const char* TFE_TensorHandleDeviceName(TFE_TensorHandle* h);
104+
TF_CAPI_EXPORT extern TF_Tensor* TFE_TensorHandleResolve(TFE_TensorHandle* h,
105+
TF_Status* status);
106+
107+
// Create a new TFE_TensorHandle with the same contents as 'h' but placed
108+
// in the memory of the device name 'device_name'.
109+
// If source and destination are the same device, then this creates a new handle
110+
// that shares the underlying buffer. Otherwise, it currently requires at least
111+
// one of the source or destination devices to be CPU (i.e., for the source or
112+
// destination tensor to be placed in host memory).
113+
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopyToDevice(TFE_TensorHandle* h,
114+
TFE_Context* ctx,
115+
const char* device_name,
116+
TF_Status* status);
117+
118+
// Description of the TensorFlow op to execute.
119+
//
120+
// Assumes that the provided 'ctx' outlives the returned TFE_Op, i.e.,
121+
// TFE_DeleteOp() is called before TFE_DeleteContext().
122+
//
123+
// Very similar to TF_OperationDescription with some differences:
124+
// (1) TF_Output or TFE_TensorHandle* as arguments to TF_AddInput,
125+
// TF_AddInputList
126+
// (2) TF_ColocateWith, TF_AddControlInput etc. do not make sense.
127+
// (3) Implementation detail: Avoid use of NodeBuilder/NodeDefBuilder since
128+
// the additional sanity checks there seem unnecessary;
129+
typedef struct TFE_Op TFE_Op;
130+
131+
TF_CAPI_EXPORT extern TFE_Op* TFE_NewOp(TFE_Context* ctx, const char* op_or_function_name,
132+
TF_Status* status);
133+
TF_CAPI_EXPORT extern void TFE_DeleteOp(TFE_Op* op);
134+
135+
TF_CAPI_EXPORT extern void TFE_OpSetDevice(TFE_Op* op, const char* device_name,
136+
TF_Status* status);
137+
138+
TF_CAPI_EXPORT extern void TFE_OpAddInput(TFE_Op* op, TFE_TensorHandle* h, TF_Status* status);
139+
140+
TF_CAPI_EXPORT extern TF_AttrType TFE_OpGetAttrType(TFE_Op* op, const char* attr_name,
141+
unsigned char* is_list, TF_Status* status);
142+
// Get an attribute type given an op name; a fusion of TFE_NewOp and
143+
// TFE_OpGetAttrType for use from Python without the overhead of the individual
144+
// calls and memory management of TFE_Op.
145+
TF_CAPI_EXPORT extern TF_AttrType TFE_OpNameGetAttrType(
146+
TFE_Context* ctx, const char* op_or_function_name, const char* attr_name,
147+
unsigned char* is_list, TF_Status* status);
148+
149+
TF_CAPI_EXPORT extern void TFE_OpSetAttrString(TFE_Op* op, const char* attr_name,
150+
const char* value);
151+
TF_CAPI_EXPORT extern void TFE_OpSetAttrInt(TFE_Op* op, const char* attr_name, int64_t value);
152+
TF_CAPI_EXPORT extern void TFE_OpSetAttrFloat(TFE_Op* op, const char* attr_name, float value);
153+
TF_CAPI_EXPORT extern void TFE_OpSetAttrBool(TFE_Op* op, const char* attr_name,
154+
unsigned char value);
155+
TF_CAPI_EXPORT extern void TFE_OpSetAttrType(TFE_Op* op, const char* attr_name,
156+
TF_DataType value);
157+
// If the number of dimensions is unknown, `num_dims` must be set to
158+
// -1 and `dims` can be null. If a dimension is unknown, the
159+
// corresponding entry in the `dims` array must be -1.
160+
TF_CAPI_EXPORT extern void TFE_OpSetAttrShape(TFE_Op* op, const char* attr_name,
161+
const int64_t* dims, const int num_dims,
162+
TF_Status* out_status);
163+
164+
// Sets the attribute attr_name to be a function specified by 'function'.
165+
//
166+
// TODO(ashankar,iga): Add this functionality to the C API for graph
167+
// construction. Perhaps we want an AttrValueMap equivalent in the C API?
168+
TF_CAPI_EXPORT extern void TFE_OpSetAttrFunction(TFE_Op* op,
169+
const char* attr_name,
170+
const TFE_Op* value);
171+
172+
TF_CAPI_EXPORT extern void TFE_OpSetAttrStringList(TFE_Op* op, const char* attr_name,
173+
const char** value, int num_values);
174+
TF_CAPI_EXPORT extern void TFE_OpSetAttrIntList(TFE_Op* op, const char* attr_name,
175+
const int64_t* values, int num_values);
176+
TF_CAPI_EXPORT extern void TFE_OpSetAttrFloatList(TFE_Op* op, const char* attr_name,
177+
const float* values, int num_values);
178+
TF_CAPI_EXPORT extern void TFE_OpSetAttrBoolList(TFE_Op* op, const char* attr_name,
179+
const unsigned char* values, int num_values);
180+
TF_CAPI_EXPORT extern void TFE_OpSetAttrTypeList(TFE_Op* op, const char* attr_name,
181+
const TF_DataType* values, int num_values);
182+
TF_CAPI_EXPORT extern void TFE_OpSetAttrShapeList(TFE_Op* op, const char* attr_name,
183+
const int64_t** dims, const int* num_dims,
184+
int num_values, TF_Status* out_status);
185+
186+
// Execute the operation defined by 'op' and return handles to computed
187+
// tensors in 'retvals'.
188+
//
189+
// 'retvals' must point to a pre-allocated array of TFE_TensorHandle*
190+
// and '*num_retvals' should be set to the size of this array.
191+
//
192+
// On return, 'num_retvals' will be set to the actual number of outputs
193+
// returned by the operation.
194+
TF_CAPI_EXPORT extern void TFE_Execute(TFE_Op* op, TFE_TensorHandle** retvals,
195+
int* num_retvals, TF_Status* status);
196+
197+
// Add a function (serialized FunctionDef protocol buffer) to ctx so
198+
// that it can be invoked using TFE_Execute.
199+
TF_CAPI_EXPORT extern void TFE_ContextAddFunctionDef(TFE_Context* ctx,
200+
const char* serialized_function_def,
201+
size_t size, TF_Status* status);
202+
203+
#ifdef __cplusplus
204+
} /* end extern "C" */
205+
#endif
206+
207+
#ifdef __cplusplus
208+
// A workaround to ease conversion to and from numpy objects and
209+
// TFE_TensorHandle's.
210+
//
211+
// TODO(ashankar): Figure out an alternative scheme that precludes the need for
212+
// these API-boundary breaking methods.
213+
namespace tensorflow {
214+
class Tensor;
215+
} // namespace tensorflow
216+
217+
const tensorflow::Tensor* TFE_TensorHandleUnderlyingTensorInHostMemory(
218+
TFE_TensorHandle* h, TF_Status* status);
219+
TFE_TensorHandle* TFE_NewTensorHandle(const tensorflow::Tensor& t);
220+
#endif
221+
222+
#endif // TENSORFLOW_C_EAGER_C_API_H_

‎package.json

+5-2
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,15 @@
1111
"codemirror": "^5.31.0",
1212
"d3": "^4.11.0",
1313
"seedrandom": "^2.4.3",
14+
"tar": "^4.0.2",
1415
"ts-loader": "^3.1.1",
1516
"typescript": "^2.6.1",
1617
"uglifyjs-webpack-plugin": "^1.0.1",
17-
"webpack": "^3.8.1"
18+
"webpack": "^3.8.1",
19+
"yauzl": "^2.9.1"
1820
},
1921
"scripts": {
2022
"webpack": "node ./node_modules/webpack/bin/webpack.js --verbose"
21-
}
23+
},
24+
"gypfile": true
2225
}

‎test-tensorflow-binding.js

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
2+
let tf;
3+
try {
4+
tf = require('./build/Debug/tensorflow-binding.node');
5+
} catch (e) {
6+
tf = require('./build/Release/tensorflow-binding.node');
7+
}
8+
9+
let typedArray = new Uint16Array([1, 2, 3, 4, 5, 6]);
10+
let tensor = new tf.Tensor(typedArray, [2, 3]);
11+
console.log(tensor.device);

0 commit comments

Comments
 (0)
This repository has been archived.