Skip to content

Commit 37738da

Browse files
committed
Numerous changes to file-modes, small build-tweaks, and a tweak to aiori-S3.c
(Only rank-0 should create the bucket, if it doesn't already exist.) Prepping this for a push (as an experimental S3 build) to github.
1 parent a50da47 commit 37738da

38 files changed

+258
-136
lines changed

COPYRIGHT

100644100755
File mode changed.

ChangeLog

100644100755
File mode changed.

META

100644100755
File mode changed.

Makefile.am

100644100755
File mode changed.

README

100644100755
+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
[See also NOTES.txt]
2+
13
Building
24
--------
35

README_S3

+82
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
[This is some help for building with experimental S3 support.]
2+
3+
4+
--- BUILDING
5+
6+
module load openmpi-gnu
7+
8+
./bootstrap
9+
10+
11+
# configure must be able to find libaws4c, libxml2, libcurl, libcrypto, etc.
12+
# On some machines, the libxml2 include files are in:
13+
#
14+
# /usr/include/libxml2
15+
#
16+
# which is not a standard include location, so we must define CPPFLAGS
17+
# to put that path in the include path. Do this:
18+
#
19+
# setenv CPPFLAGS -I/usr/include/libxml2
20+
# [or add "CPPFLAGS=-I/usr/include/libxml2" to the end of the configure command]
21+
#
22+
# Use the --with-aws4c=DIR to point to the path where your libaws4c library
23+
# sits.
24+
#
25+
# Use the --prefix=iorInstallDir to point to the path where you want IOR
26+
# to install all the files it builds during the "make" process.
27+
28+
./configure --prefix=iorInstallDir --with-S3 [ --with-aws4c=DIR ]
29+
30+
31+
# Here is an example configure-command invocation, for building with the
32+
# S3-targeting extensions, which require access to additional libraries
33+
# mentioned above. This will probably not work verbatim on your
34+
# installation, because libraries will be installed in different locations,
35+
# but it could help you to understand what is needed.
36+
37+
./configure --prefix=`pwd`/installed \
38+
--with-S3 \
39+
--with-aws4c=`pwd`/aws4c \
40+
LDFLAGS="-L/usr/lib64 -L`pwd`/libxml2-2.9.1/installed/lib" \
41+
CFLAGS="-I`pwd`/libxml2-2.9.1/installed/include/libxml2"
42+
43+
44+
# 14-May-2015:
45+
# To change the target of the experiment, there is an #if block from line
46+
# 284-309. The "if" portion is activated by putting a 1 as the argument
47+
# instead of a 0. In that case, the experiment will use the four ECS nodes
48+
# directly, splitting the load up between all four.
49+
#
50+
# If the #if argument is 0, then the "#else" portion is executed. In this
51+
# case you can use the load balancer, haproxy.ccstar.lanl.gov, by using
52+
# the IP 10.143.0.1:80. If you want to use one of the ECS nodes directly
53+
# us the IP 10.140.0.[15-17]:9020.
54+
#
55+
# To specify the bucket where the experiment file(s) will go, you need
56+
# to set that with the "const char* bucket_name declaration. There are
57+
# a couple options at lines 207-208.
58+
59+
60+
make
61+
make install
62+
63+
64+
-- RUNNING (various options ...)
65+
66+
# llogin -np 4
67+
msub -IX -l nodes=4:ppn=4,walltime=11:00:00
68+
69+
70+
71+
# For debugging, run on 1 node, -vvvv turns on detailed curl debugging
72+
mpirun -np 1 MY_INSTALL_DIR/bin/ior -a S3 -o test_`date +"%Y%m%d_%H%M%S"` -vvvv -t1k -b1k
73+
74+
# this defaults the number of processors
75+
mpirun MY_INSTALL_DIRbin/ior -a S3 -C -o test_`date +"%Y%m%d_%H%M%S"`
76+
77+
# this does one parallel run, putting a heavy load on the server [assumes bash]
78+
mpirun -npernode 8 MY_INSTALL_DIR/bin/ior -a S3_EMC -C -o test_`date +"%Y%m%d_%H%M%S"` \
79+
-b $(( 128 * 1024 * 1024 )) \
80+
-t $(( 128 * 1024 * 1024 )) \
81+
-i 1
82+

TBD.txt

-90
This file was deleted.

aws4c-0.5.1.tgz

-31.6 KB
Binary file not shown.

config/ax_prog_cc_mpi.m4

100644100755
File mode changed.

config/x_ac_meta.m4

100644100755
File mode changed.

configure.ac

100644100755
+67-3
Original file line numberDiff line numberDiff line change
@@ -112,17 +112,81 @@ AM_COND_IF([USE_POSIX_AIORI],[
112112
AC_DEFINE([USE_POSIX_AIORI], [], [Build POSIX backend AIORI])
113113
])
114114

115-
# Amazon S3 support
115+
116+
117+
118+
# aws4c is needed for the S3 backend (see --with-S3, below).
119+
# Version 0.5.2 of aws4c is available at https://github.com/jti-lanl/aws4c.git
120+
# Install it something like this:
121+
#
122+
# cd $my_install_dir
123+
# git clone https://github.com/jti-lanl/aws4c.git
124+
# cd aws4c
125+
# make
126+
#
127+
# Then:
128+
# --with-S3 --with-aws4c=$my_install_dir/aws4c
129+
130+
aws4c_dir=
131+
AC_ARG_WITH([aws4c],
132+
[AS_HELP_STRING([--with-aws4c=DIR],
133+
[aws4c library is needed for Amazon S3 backend])],
134+
[aws4c_dir="$withval"])
135+
AM_CONDITIONAL([AWS4C_DIR], [test x$aws4c_dir != x])
136+
137+
# AC_SUBST([AWS4C_DIR],[$aws4c_dir])
138+
AM_COND_IF([AWS4C_DIR],[
139+
AC_SUBST([AWS4C_CPPFLAGS],[-I$aws4c_dir])
140+
AC_SUBST([AWS4C_LDFLAGS], [-L$aws4c_dir])
141+
])
142+
143+
144+
# Amazon S3 support [see also: --with-aws4c]
116145
AC_ARG_WITH([S3],
117146
[AS_HELP_STRING([--with-S3],
118-
[support IO with Amazon S3 backend @<:@default=yes@:>@])],
147+
[support IO with Amazon S3 backend @<:@default=no@:>@])],
119148
[],
120-
[with_S3=yes])
149+
[with_S3=no])
121150
AM_CONDITIONAL([USE_S3_AIORI], [test x$with_S3 = xyes])
122151
AM_COND_IF([USE_S3_AIORI],[
123152
AC_DEFINE([USE_S3_AIORI], [], [Build Amazon-S3 backend AIORI])
124153
])
125154

155+
err=0
156+
AS_IF([test "x$with_S3" != xno], [
157+
AC_MSG_NOTICE([beginning of S3-related checks])
158+
159+
# save user's values, while we use AC_CHECK_HEADERS with $AWS4C_DIR
160+
ORIG_CPPFLAGS=$CPPFLAGS
161+
ORIG_LDFLAGS=$LDFLAGS
162+
163+
CPPFLAGS="$CPPFLAGS $AWS4C_CPPFLAGS"
164+
LDFLAGS=" $LDFLAGS $AWS4C_LDFLAGS"
165+
166+
AC_CHECK_HEADERS([aws4c.h], [], [err=1])
167+
AC_CHECK_HEADERS([libxml/parser.h], [], [err=1])
168+
169+
# Autotools thinks searching for a library means I want it added to LIBS
170+
ORIG_LIBS=$LIBS
171+
AC_CHECK_LIB([curl], [curl_easy_init], [], [err=1])
172+
AC_CHECK_LIB([xml2], [xmlDocGetRootElement], [], [err=1])
173+
AC_CHECK_LIB([aws4c], [s3_get], [], [err=1], [-lcurl -lxml2 -lcrypto])
174+
LIBS=$ORIG_LIBS
175+
176+
AC_MSG_NOTICE([end of S3-related checks])
177+
if test "$err" == 1; then
178+
AC_MSG_FAILURE([S3 support is missing. dnl
179+
Make sure you have access to libaws4c, libcurl, libxml2, and libcrypto. dnl
180+
Consider --with-aws4c=, CPPFLAGS, LDFLAGS, etc])
181+
fi
182+
183+
# restore user's values
184+
CPPFLAGS=$ORIG_CPPFLAGS
185+
LDFLAGS=$ORIG_LDFLAGS
186+
])
187+
188+
189+
126190

127191

128192

contrib/Makefile.am

100644100755
File mode changed.

contrib/cbif.c

100644100755
File mode changed.

doc/Makefile.am

100644100755
File mode changed.

doc/USER_GUIDE

100644100755
File mode changed.

scripts/exampleScript

100644100755
File mode changed.

scripts/run_script.cnl

100644100755
File mode changed.

scripts/run_script.linux

100644100755
File mode changed.

src/Makefile.am

100644100755
+4-5
Original file line numberDiff line numberDiff line change
@@ -52,12 +52,11 @@ endif
5252

5353

5454
if USE_S3_AIORI
55-
# TBD: Find the aws4c and libxml installations programmatically
56-
AWS4C = /users/jti/projects/ecs_hobo/tools/aws4c-0.5
5755
ior_SOURCES += aiori-S3.c
58-
ior_CPPFLAGS += -I/usr/include/libxml2
59-
ior_CPPFLAGS += -I$(AWS4C)
60-
ior_LDFLAGS += -L$(AWS4C)
56+
if AWS4C_DIR
57+
ior_CPPFLAGS += $(AWS4C_CPPFLAGS)
58+
ior_LDFLAGS += $(AWS4C_LDFLAGS)
59+
endif
6160
ior_LDADD += -lcurl
6261
ior_LDADD += -lxml2
6362
ior_LDADD += -laws4c

src/aiori-HDF5.c

100644100755
File mode changed.

src/aiori-HDFS.c

100644100755
File mode changed.

src/aiori-MPIIO.c

100644100755
File mode changed.

src/aiori-NCMPI.c

100644100755
File mode changed.

src/aiori-PLFS.c

100644100755
File mode changed.

src/aiori-POSIX.c

100644100755
File mode changed.

src/aiori-S3.c

100644100755
+49-14
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,7 @@ CURLcode rc;
205205

206206
/* Any objects we create or delete will be under this bucket */
207207
const char* bucket_name = "ior";
208+
//const char* bucket_name = "brettk";
208209

209210

210211
/***************************** F U N C T I O N S ******************************/
@@ -259,9 +260,9 @@ s3_connect( IOR_param_t* param ) {
259260
// NOTE: These inits could be done in init_IORParam_t(), in ior.c, but
260261
// would require conditional compilation, there.
261262

263+
aws_set_debug(param->verbose >= 4);
262264
aws_read_config(getenv("USER")); // requires ~/.awsAuth
263265
aws_reuse_connections(1);
264-
aws_set_debug(param->verbose >= 4);
265266

266267
// initalize IOBufs. These are basically dynamically-extensible
267268
// linked-lists. "growth size" controls the increment of new memory
@@ -272,24 +273,59 @@ s3_connect( IOR_param_t* param ) {
272273
param->etags = aws_iobuf_new();
273274
aws_iobuf_growth_size(param->etags, 1024*1024*8);
274275

276+
// WARNING: if you have http_proxy set in your environment, you may need
277+
// to override it here. TBD: add a command-line variable to
278+
// allow you to define a proxy.
279+
//
275280
// our hosts are currently 10.140.0.15 - 10.140 0.18
276281
// TBD: Try DNS-round-robin server at vi-lb.ccstar.lanl.gov
277-
snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4));
278-
s3_set_host(buff);
282+
// TBD: try HAProxy round-robin at 10.143.0.1
283+
284+
#if 1
285+
// snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4));
286+
// s3_set_proxy(buff);
287+
//
288+
// snprintf(buff, BUFF_SIZE, "10.140.0.%d", 15 + (rank % 4));
289+
// s3_set_host(buff);
290+
291+
snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4));
292+
s3_set_host(buff);
293+
294+
#else
295+
/*
296+
* If you just want to go to one if the ECS nodes, put that IP
297+
* address in here directly with port 9020.
298+
*
299+
*/
300+
// s3_set_host("10.140.0.15:9020");
301+
302+
/*
303+
* If you want to go to haproxy.ccstar.lanl.gov, this is its IP
304+
* address.
305+
*
306+
*/
307+
// s3_set_proxy("10.143.0.1:80");
308+
// s3_set_host( "10.143.0.1:80");
309+
#endif
279310

280311
// make sure test-bucket exists
281312
s3_set_bucket((char*)bucket_name);
282-
AWS4C_CHECK( s3_head(param->io_buf, "") );
283-
if ( param->io_buf->code == 404 ) { // "404 Not Found"
284-
printf(" bucket '%s' doesn't exist\n", bucket_name);
285313

286-
AWS4C_CHECK( s3_put(param->io_buf, "") ); /* creates URL as bucket + obj */
287-
AWS4C_CHECK_OK( param->io_buf ); // assure "200 OK"
288-
printf("created bucket '%s'\n", bucket_name);
289-
}
290-
else { // assure "200 OK"
291-
AWS4C_CHECK_OK( param->io_buf );
292-
}
314+
if (rank == 0) {
315+
AWS4C_CHECK( s3_head(param->io_buf, "") );
316+
if ( param->io_buf->code == 404 ) { // "404 Not Found"
317+
printf(" bucket '%s' doesn't exist\n", bucket_name);
318+
319+
AWS4C_CHECK( s3_put(param->io_buf, "") ); /* creates URL as bucket + obj */
320+
AWS4C_CHECK_OK( param->io_buf ); // assure "200 OK"
321+
printf("created bucket '%s'\n", bucket_name);
322+
}
323+
else { // assure "200 OK"
324+
AWS4C_CHECK_OK( param->io_buf );
325+
}
326+
}
327+
MPI_CHECK(MPI_Barrier(param->testComm), "barrier error");
328+
293329

294330
// Maybe allow EMC extensions to S3
295331
s3_enable_EMC_extensions(param->curl_flags & IOR_CURL_S3_EMC_EXT);
@@ -821,7 +857,6 @@ S3_Xfer_internal(int access,
821857
// we're "extending" rather than "appending". That means the
822858
// buffer represents empty storage, which will be filled by the
823859
// libcurl writefunction, invoked via aws4c.
824-
825860
aws_iobuf_reset(param->io_buf);
826861
aws_iobuf_extend_static(param->io_buf, data_ptr, remaining);
827862
AWS4C_CHECK( s3_get(param->io_buf, file) );

src/aiori.h

100644100755
File mode changed.

0 commit comments

Comments
 (0)