diff --git a/externals/nitro/externals/coda-oss/CMakeLists.txt b/externals/nitro/externals/coda-oss/CMakeLists.txt
new file mode 100644
index 000000000..eaa5e7f21
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/CMakeLists.txt
@@ -0,0 +1,60 @@
+# - CODA-OSS CMake Build script.
+#
+# Author: Scott A. Colcord
+
+cmake_minimum_required(VERSION 3.14)
+project(coda-oss)
+
+project(coda-oss)
+
+set(CMAKE_CXX_STANDARD 20)
+set(CXX_STANDARD_REQUIRED true)
+
+if (EXISTS "${CMAKE_BINARY_DIR}/conanbuildinfo.cmake")
+ # build and package with conan
+ include("${CMAKE_BINARY_DIR}/conanbuildinfo.cmake")
+ conan_basic_setup()
+
+ include("${CMAKE_BINARY_DIR}/conan_paths.cmake")
+endif()
+
+if (${CMAKE_PROJECT_NAME} STREQUAL coda-oss)
+ # this is the top level project
+
+ # Always turn on "warnings as errors" to avoid lots of (meaningless?) build output;
+ # we'll dial-back warnings as necessary.
+ if (MSVC)
+ # set warning level to /W3
+ string(REGEX REPLACE "/W[0-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ string(REGEX REPLACE "/W[0-4]" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
+ add_compile_options(/std:c++20)
+ elseif (UNIX)
+ set(CMAKE_C_COMPILER gcc-10)
+ set(CMAKE_CXX_COMPILER g++-10)
+ add_compile_options(
+ -Wno-deprecated
+ -Wno-unused-value
+ -Wno-unused-but-set-variable
+ )
+ add_compile_options(-std=c++20)
+ endif()
+
+ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
+ include(CodaBuild)
+
+ coda_initialize_build()
+
+ # install our cmake build modules for downstream use
+ install(DIRECTORY "cmake/"
+ DESTINATION "${CODA_STD_PROJECT_LIB_DIR}/cmake/"
+ FILES_MATCHING PATTERN "*.cmake")
+endif()
+
+add_subdirectory("modules")
+
+# generate package config, with the following paths exported
+set(JARS_DIR ${CODA_STD_PROJECT_LIB_DIR} CACHE INTERNAL
+ "path to installed jars, needed by downstream projects")
+set(SWIG_INCLUDE_DIR "${CODA_STD_PROJECT_INCLUDE_DIR}/swig" CACHE INTERNAL
+ "path to installed SWIG includes, needed by downstream projects")
+coda_generate_package_config(JARS_DIR SWIG_INCLUDE_DIR)
diff --git a/externals/nitro/externals/coda-oss/UnitTest/UnitTest.vcxproj b/externals/nitro/externals/coda-oss/UnitTest/UnitTest.vcxproj
new file mode 100644
index 000000000..d0db7a704
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/UnitTest/UnitTest.vcxproj
@@ -0,0 +1,370 @@
+
+
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ 17.0
+ {34A31B3F-47C5-441D-AB22-3C85B3C5314E}
+ Win32Proj
+ UnitTest
+ 10.0
+ NativeUnitTestProject
+
+
+
+ DynamicLibrary
+ true
+ v143
+ false
+ Unicode
+
+
+ DynamicLibrary
+ false
+ v143
+ true
+ false
+ Unicode
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+
+
+ false
+
+
+
+ Use
+ Level4
+ true
+ $(VCInstallDir)UnitTest\include;$(ProjectDir);$(SolutionDir)modules\c++\;$(SolutionDir)modules\c++\avx\include\;$(SolutionDir)modules\c++\cli\include\;$(SolutionDir)modules\c++\config\include\;$(SolutionDir)modules\c++\coda_oss\include\;$(SolutionDir)modules\c++\gsl\include\;$(SolutionDir)modules\c++\io\include\;$(SolutionDir)modules\c++\std\include\;$(SolutionDir)modules\c++\str\include\;$(SolutionDir)modules\c++\sys\include\;$(SolutionDir)modules\c++\except\include\;$(SolutionDir)modules\c++\logging\include\;$(SolutionDir)modules\c++\math\include\;$(SolutionDir)modules\c++\math.linear\include\;$(SolutionDir)modules\c++\math.poly\include\;$(SolutionDir)modules\c++\mem\include\;$(SolutionDir)modules\c++\mt\include\;$(SolutionDir)modules\c++\polygon\include\;$(SolutionDir)modules\c++\re\include\;$(SolutionDir)modules\c++\types\include\;$(SolutionDir)modules\c++\units\include\;%(AdditionalIncludeDirectories)
+ _DEBUG;%(PreprocessorDefinitions);MT_DEFAULT_PINNING=0;RE_ENABLE_STD_REGEX=1
+ true
+ pch.h
+ true
+ true
+ Guard
+ ProgramDatabase
+ true
+ stdcpp20
+
+
+ Windows
+ $(VCInstallDir)UnitTest\lib;%(AdditionalLibraryDirectories)
+
+
+
+
+ Use
+ Level3
+ true
+ true
+ true
+ $(VCInstallDir)UnitTest\include;$(ProjectDir);$(SolutionDir)modules\c++\;$(SolutionDir)modules\c++\avx\include\;$(SolutionDir)modules\c++\cli\include\;$(SolutionDir)modules\c++\config\include\;$(SolutionDir)modules\c++\coda_oss\include\;$(SolutionDir)modules\c++\gsl\include\;$(SolutionDir)modules\c++\io\include\;$(SolutionDir)modules\c++\std\include\;$(SolutionDir)modules\c++\str\include\;$(SolutionDir)modules\c++\sys\include\;$(SolutionDir)modules\c++\except\include\;$(SolutionDir)modules\c++\logging\include\;$(SolutionDir)modules\c++\math\include\;$(SolutionDir)modules\c++\math.linear\include\;$(SolutionDir)modules\c++\math.poly\include\;$(SolutionDir)modules\c++\mem\include\;$(SolutionDir)modules\c++\mt\include\;$(SolutionDir)modules\c++\polygon\include\;$(SolutionDir)modules\c++\re\include\;$(SolutionDir)modules\c++\types\include\;$(SolutionDir)modules\c++\units\include\;%(AdditionalIncludeDirectories)
+ NDEBUG;%(PreprocessorDefinitions);MT_DEFAULT_PINNING=0;RE_ENABLE_STD_REGEX=1
+ true
+ pch.h
+ true
+ Guard
+ true
+ stdcpp20
+ true
+
+
+ Windows
+ true
+ true
+ $(VCInstallDir)UnitTest\lib;%(AdditionalLibraryDirectories)
+
+
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+ Create
+ Create
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {9997e895-5161-4ddf-8f3f-099894cb2f21}
+
+
+
+
+
+
\ No newline at end of file
diff --git a/externals/nitro/externals/coda-oss/build/build.py b/externals/nitro/externals/coda-oss/build/build.py
new file mode 100644
index 000000000..3a3f93834
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/build/build.py
@@ -0,0 +1,1865 @@
+import sys, os, types, re, fnmatch, subprocess, shutil, platform, inspect
+from os.path import split, isdir, isfile, exists, splitext, abspath, join, \
+ basename, dirname
+
+from waflib import Options, Utils, Logs, TaskGen, Context
+from waflib.Options import OptionsContext
+from waflib.Configure import conf, ConfigurationContext
+from waflib.Build import BuildContext, ListContext, CleanContext, InstallContext
+from waflib.TaskGen import task_gen, feature, after, before
+from waflib.Task import Task
+from waflib.Utils import to_list as listify
+from waflib.Utils import h_file
+from waflib.Tools import waf_unit_test
+from waflib import Context, Errors
+from msvs import msvs_generator
+from eclipse import eclipse
+from dumpenv import dumpenv
+from dumplib import dumplib
+from dumplibraw import dumplibraw
+from dumpconfig import dumpconfig
+from makewheel import makewheel
+from package import package
+
+try:
+ import hashlib
+ hashlib.md5()
+except ValueError:
+ Logs.error('MD5 error - you are likely trying to use an old python on a new machine to run waf. '
+ 'If you run into a fatal FIPS error try finding a newer version of python.')
+
+COMMON_EXCLUDES = '.bzr .bzrignore .git .gitignore .svn CVS .cvsignore .arch-ids {arch} SCCS BitKeeper .hg _MTN _darcs Makefile Makefile.in config.log'.split()
+COMMON_EXCLUDES_EXT ='~ .rej .orig .pyc .pyo .bak .tar.bz2 tar.gz .zip .swp'.split()
+
+# ignore files ending in these extensions
+for ext in COMMON_EXCLUDES_EXT:
+ TaskGen.extension(ext)(Utils.nada)
+
+if sys.version_info < (3,6,0):
+ raise Errors.WafError('Build system requires at least Python 3.6')
+
+# provide a partial function if we don't have one
+try:
+ from functools import partial
+except:
+ def partial(fn, *cargs, **ckwargs):
+ def call_fn(*fargs, **fkwargs):
+ d = ckwargs.copy()
+ d.update(fkwargs)
+ return fn(*(cargs + fargs), **d)
+ return call_fn
+
+class CPPContext(Context.Context):
+ """
+ Create a custom context for building C/C++ modules/plugins
+ """
+ cmd='evil'
+ module_hooks = []
+
+ def recurse(self,string=None):
+ dirs = []
+ if isinstance(string, str):
+ if len(string) == 0:
+ return
+ else:
+ dirs = string.split()
+ else:
+ dirs = string
+
+ if not dirs:
+ dirs = next(os.walk(self.path.abspath()))[1]
+
+ # Move 'drivers' to the front of the list. We want to ensure that
+ # drivers are configured before modules so dependencies are resolved
+ # correctly
+ if 'drivers' in dirs:
+ dirs.remove('drivers')
+ dirs.insert(0, 'drivers')
+
+ dirsWithWscripts = [x for x in dirs if exists(
+ join(self.path.abspath(), x, 'wscript'))]
+
+ super(CPPContext, self).recurse(dirsWithWscripts)
+
+ def safeVersion(self, version):
+ return re.sub(r'[^\w]', '.', str(version))
+
+ def __getDefines(self, env):
+ defines = []
+ for line in env.DEFINES:
+ split = line.split('=')
+ key = split[0]
+ value = len(split) == 2 and split[1] or '1'
+ if value is not None and value != ():
+ if key.startswith('HAVE_') or key.startswith('USE_'):
+ defines.append(key)
+ else:
+ defines.append('%s=%s' % (key, value))
+ return defines
+
+ def _getEnv(self, modArgs):
+ if 'env' in modArgs:
+ env = modArgs['env']
+ else:
+ variant = modArgs.get('variant', self.env['VARIANT'] or 'default')
+ env = self.all_envs[variant]
+ return env
+
+ def _extendGlobPatterns(self, globPatterns, modArgs):
+ lang = modArgs.get('lang', 'c++')
+ sourceExtensions = {'c++':'.cpp', 'c':'.c'}.get(lang, 'cxx')
+ allSourceExtensions = (listify(modArgs.get('source_ext', '')) +
+ [sourceExtensions])
+
+ # Entire source directories can also be provided via
+ # 'source_dir' or 'sourcedir'
+ sourceDirs = listify(modArgs.get('source_dir',
+ modArgs.get('sourcedir', 'source')))
+ for dir in sourceDirs:
+ for ext in allSourceExtensions:
+ globPatterns.append(join(dir, '*%s' % ext))
+
+ return globPatterns
+
+ def _configureUselibs(self, targetsToAdd, modArgs):
+ # This specifies that we need to check if it is a USELIB or USELIB_LOCAL
+ # If MAKE_%% is defined, then it is local; otherwise, it's a uselib
+ # If we're doing a source installation and we built it locally, the
+ # source target already got added on as a dependency. If we didn't
+ # build it locally, we need to add the source target on here since
+ # in that case this module doesn't depend on a task associated with
+ # the external library.
+ env = self._getEnv(modArgs)
+ lang = modArgs.get('lang', 'c++')
+ module_deps = list(['%s-%s' % (x, lang) for x in listify(
+ modArgs.get('module_deps', ''))])
+ uselib_local = module_deps + (listify(modArgs.get('uselib_local', ''))
+ + listify(modArgs.get('use','')))
+ uselib = listify(modArgs.get('uselib', '')) + ['CSTD', 'CRUN']
+
+ uselibCheck = modArgs.get('uselib_check', None)
+ if uselibCheck:
+ for currentLib in listify(uselibCheck):
+ if ('MAKE_%s' % currentLib) in env:
+ uselib_local += [currentLib]
+ else:
+ uselib += [currentLib]
+ if env['install_source']:
+ sourceTarget = '%s_SOURCE_INSTALL' % currentLib
+ targetsToAdd.append(sourceTarget)
+
+ # this specifies that we need to check if it is a USELIB or USELIB_LOCAL
+ # if MAKE_%% is defined, then it is local; otherwise, it's a uselib
+ uselibCheck = modArgs.pop('uselib_check', None)
+ if uselibCheck:
+ if ('MAKE_%s' % uselibCheck) in env:
+ uselib_local.append(uselibCheck)
+ else:
+ uselib.append(uselibCheck)
+
+ return uselib_local, uselib
+
+
+ def pprint(self, *strs, **kw):
+ colors = listify(kw.get('colors', 'blue'))
+ colors = list(map(str.upper, colors))
+ for i, s in enumerate(strs):
+ sys.stderr.write("%s%s " % (Logs.colors(colors[i % len(colors)]), s))
+ sys.stderr.write("%s%s" % (Logs.colors.NORMAL, os.linesep))
+
+ def install_tgt(self, **modArgs):
+ # The main purpose this serves is to recursively copy all the wscript's
+ # involved when we have a wscript whose sole job is to install files
+ modArgs = dict((k.lower(), v) for k, v in list(modArgs.items()))
+ env = self._getEnv(modArgs)
+
+ features = 'install_tgt'
+ if env['install_source']:
+ targetsToAdd = modArgs.get('targets_to_add', [])
+ targetsToAdd = targetsToAdd + getWscriptTargets(self, env, self.path)
+ modArgs['targets_to_add'] = targetsToAdd
+ features += ' add_targets'
+ return self(features = features, **modArgs)
+
+ def module(self, **modArgs):
+ """
+ Builds a module, along with optional tests.
+ It makes assumptions, but most can be overridden by passing in args.
+ """
+ bld = self
+ env = self._getEnv(modArgs)
+
+ modArgs = dict((k.lower(), v) for k, v in list(modArgs.items()))
+
+ for func in self.module_hooks:
+ func(modArgs, env)
+
+ lang = modArgs.get('lang', 'c++')
+ libExeType = {'c++':'cxx', 'c':'c'}.get(lang, 'cxx')
+ if modArgs.get('nosuffix', False) :
+ libName = modArgs['name']
+ else :
+ libName = '%s-%s' % (modArgs['name'], lang)
+ path = modArgs.get('path',
+ 'dir' in modArgs and bld.path.find_dir(modArgs['dir']) or bld.path)
+
+ defines = self.__getDefines(env) + listify(modArgs.get('defines', ''))
+ includes = listify(modArgs.get('includes', 'include'))
+ exportIncludes = listify(modArgs.get('export_includes', 'include'))
+ libVersion = modArgs.get('version', None)
+ installPath = modArgs.get('install_path', None)
+
+ targetsToAdd = listify(modArgs.get('targets_to_add', ''))
+ uselib_local, uselib = self._configureUselibs(targetsToAdd, modArgs)
+
+ if libVersion is not None and sys.platform != 'win32':
+ targetName = '%s.%s' % (libName, self.safeVersion(libVersion))
+ else:
+ targetName = libName
+
+ glob_patterns = listify(modArgs.get('source', '')) or []
+ glob_patterns = self._extendGlobPatterns(glob_patterns, modArgs)
+
+ # Build the lib
+ lib = bld(features='%s %s%s add_targets includes'% (libExeType, libExeType, env['LIB_TYPE'] or 'stlib'), includes=includes,
+ target=targetName, name=libName, export_includes=exportIncludes,
+ use=uselib_local, uselib=uselib, env=env.derive(),
+ defines=defines, path=path,
+ source=path.ant_glob(glob_patterns), targets_to_add=targetsToAdd)
+ lib.source = list(filter(partial(lambda x, t: basename(str(t)) not in x, modArgs.get('source_filter', '').split()), lib.source))
+
+ if env['install_libs']:
+ lib.install_path = installPath or env['install_libdir']
+
+ if not lib.source:
+ lib.features = 'add_targets includes'
+
+ pattern = env['%s%s_PATTERN' % (libExeType, env['LIB_TYPE'] or 'stlib')]
+ if libVersion is not None and sys.platform != 'win32' and Options.options.symlinks and env['install_libs'] and lib.source:
+ symlinkLoc = '%s/%s' % (lib.install_path, pattern % libName)
+ lib.targets_to_add.append(bld(features='symlink_as_tgt', dest=symlinkLoc, src=pattern % lib.target, name='%s-symlink' % libName))
+
+ if env['install_headers']:
+ lib.targets_to_add.append(bld(features='install_tgt', pattern='**/*',
+ dir=path.make_node('include'),
+ install_path=env['install_includedir']))
+
+ # copy config headers from target dir to install dir
+ moduleName = modArgs['name']
+ installPath = moduleName.replace('.', os.sep)
+
+ d = {}
+ for line in env['header_builddir']:
+ split = line.split('=')
+ k = split[0]
+ v = join(self.bldnode.abspath(), split[1])
+ d[k] = v
+
+ if moduleName in d:
+ configFilename = getConfigFilename(moduleName)
+ targetPath = bld.root.find_dir(d[moduleName]).path_from(path)
+ moduleNode = bld.path.make_node(targetPath)
+ lib.targets_to_add.append(bld(features='install_tgt', files=[configFilename],
+ dir=moduleNode,
+ install_path=join(env['install_includedir'], installPath)))
+
+ addSourceTargets(bld, env, path, lib)
+
+ testNode = path.make_node('tests')
+ if os.path.exists(testNode.abspath()) and not Options.options.libs_only:
+ test_deps = listify(modArgs.get('test_deps', modArgs.get('module_deps', '')))
+
+ test_deps.append(modArgs['name'])
+
+ test_deps = list(['%s-%s' % (x, lang) for x in test_deps + listify(modArgs.get('test_uselib_local', '')) + listify(modArgs.get('test_use',''))])
+
+ sourceExtension = {'c++':'.cpp', 'c':'.c'}.get(lang, 'cxx')
+ for test in testNode.ant_glob('*%s' % sourceExtension):
+ if str(test) not in listify(modArgs.get('test_filter', '')):
+ testName = splitext(str(test))[0]
+ self.program(env=env.derive(), name=testName, target=testName, source=str(test),
+ use=test_deps,
+ uselib=modArgs.get('test_uselib', uselib),
+ lang=lang, path=testNode, includes=includes, defines=defines,
+ install_path='${PREFIX}/tests/%s' % modArgs['name'])
+
+
+ # Create install target for python tests
+ if not Options.options.libs_only:
+ for testDirname in ['tests', 'unittests']:
+ pythonTestNode = path.parent.parent.make_node('python').\
+ make_node(str(path)).make_node(testDirname)
+ if os.path.exists(pythonTestNode.abspath()):
+ tests = [str(test) for test in pythonTestNode.ant_glob('*.py') if
+ str(test) not in listify(modArgs.get('test_filter', ''))]
+ for test in tests:
+ installPath = '${PREFIX}/%s/%s' % (testDirname, modArgs['name'])
+ bld(features='install_tgt',
+ files=[test], dir=pythonTestNode,
+ name=test, target=test,
+ install_path=installPath)
+
+
+ testNode = path.make_node('unittests')
+ if os.path.exists(testNode.abspath()) and not Options.options.libs_only:
+ test_deps = listify(modArgs.get('unittest_deps', modArgs.get('module_deps', '')))
+ test_uselib = listify(modArgs.get('unittest_uselib', uselib))
+
+ test_deps.append(modArgs['name'])
+
+ if 'INCLUDES_UNITTEST' in env:
+ for incl_dir in env['INCLUDES_UNITTEST']:
+ includes.append(incl_dir)
+
+ test_deps = list(['%s-%s' % (x, lang) for x in test_deps + listify(modArgs.get('test_uselib_local', '')) + listify(modArgs.get('test_use',''))])
+
+ tests = []
+ sourceExtensions = {'c++':'.cpp', 'c':'.c'}.get(lang, 'cxx')
+ for test in testNode.ant_glob('*%s' % sourceExtensions):
+ if str(test) not in listify(modArgs.get('unittest_filter', '')):
+ testName = splitext(str(test))[0]
+ exe = self(features='%s %sprogram test' % (libExeType, libExeType),
+ env=env.derive(), name=testName, target=testName, source=str(test), use=test_deps,
+ uselib = modArgs.get('unittest_uselib', modArgs.get('uselib', '')),
+ lang=lang, path=testNode, defines=defines,
+ includes=includes,
+ install_path='${PREFIX}/unittests/%s' % modArgs['name'])
+ tests.append(testName)
+
+ confDir = path.make_node('conf')
+ if exists(confDir.abspath()):
+ lib.targets_to_add.append(
+ bld(features='install_tgt', dir=confDir, pattern='**',
+ install_path='${PREFIX}/share/%s/conf' % modArgs['name'],
+ copy_to_source_dir=True))
+
+ return env
+
+
+ def plugin(self, **modArgs):
+ """
+ Builds a plugin (.so) and sets the install path based on the type of
+ plugin (via the plugin kwarg).
+ """
+ bld = self
+ env = self._getEnv(modArgs).derive()
+
+ modArgs = dict((k.lower(), v) for k, v in list(modArgs.items()))
+ lang = modArgs.get('lang', 'c++')
+ libExeType = {'c++':'cxx', 'c':'c'}.get(lang, 'cxx')
+ libName = modArgs.get('libname', '%s-%s' % (modArgs['name'], lang))
+ targetName = modArgs.get('targetname', libName)
+ plugin = modArgs.get('plugin', '')
+ path = modArgs.get('path',
+ 'dir' in modArgs and bld.path.find_dir(modArgs['dir']) or bld.path)
+
+ defines = self.__getDefines(env) + listify(modArgs.get('defines', '')) + ['PLUGIN_MODULE_EXPORTS']
+ includes = listify(modArgs.get('includes', 'include'))
+ exportIncludes = listify(modArgs.get('export_includes', 'include'))
+ source = listify(modArgs.get('source', '')) or None
+ removePluginPrefix = modArgs.get('removepluginprefix', False)
+
+ # This is so that on Unix we name the plugins without the 'lib' prefix
+ if removePluginPrefix:
+ if env['cshlib_PATTERN'].startswith('lib'):
+ env['cshlib_PATTERN'] = env['cshlib_PATTERN'][3:]
+ if env['cxxshlib_PATTERN'].startswith('lib'):
+ env['cxxshlib_PATTERN'] = env['cxxshlib_PATTERN'][3:]
+
+
+ targetsToAdd = listify(modArgs.get('targets_to_add', ''))
+ uselib_local, uselib = self._configureUselibs(targetsToAdd, modArgs)
+
+ lib = bld(features='%s %sshlib add_targets no_implib' % (libExeType, libExeType),
+ target=libName, name=targetName, source=source,
+ includes=includes, export_includes=exportIncludes,
+ use=uselib_local, uselib=uselib, env=env,
+ defines=defines, path=path, targets_to_add=targetsToAdd,
+ install_path=join(env['install_sharedir'], plugin, 'plugins'))
+
+ glob_patterns = self._extendGlobPatterns([], modArgs)
+
+ if not source:
+ lib.source = path.ant_glob(glob_patterns)
+ lib.source = list(filter(partial(lambda x, t: basename(str(t)) not in x, modArgs.get('source_filter', '').split()), lib.source))
+ if env['install_headers']:
+ lib.targets_to_add.append(bld(features='install_tgt', pattern='**/*',
+ dir=path.make_node('include'),
+ install_path=env['install_includedir']))
+
+ addSourceTargets(self, env, path, lib)
+
+ confDir = path.make_node('conf')
+ if exists(confDir.abspath()):
+ lib.targets_to_add.append(
+ bld(features='install_tgt', dir=confDir, pattern='**',
+ install_path='${PREFIX}/share/%s/conf' % plugin,
+ copy_to_source_dir=True))
+
+ pluginsTarget = '%s-plugins' % plugin
+ try:
+ bld.get_tgen_by_name(pluginsTarget).targets_to_add.append(libName)
+ except:
+ bld(target=pluginsTarget,
+ features='add_targets', targets_to_add=[libName])
+
+ def program_helper(self, **modArgs):
+ """
+ Builds a program (exe)
+ """
+ bld = self
+ env = self._getEnv(modArgs)
+
+ modArgs = dict((k.lower(), v) for k, v in list(modArgs.items()))
+ lang = modArgs.get('lang', 'c++')
+ libExeType = {'c++':'cxx', 'c':'c'}.get(lang, 'cxx')
+ progName = modArgs['name']
+ path = modArgs.get('path',
+ 'dir' in modArgs and bld.path.find_dir(modArgs['dir']) or bld.path)
+
+ module_deps = list(['%s-%s' % (x, lang) for x in listify(modArgs.get('module_deps', ''))])
+ defines = self.__getDefines(env) + listify(modArgs.get('defines', ''))
+ uselib_local = module_deps + listify(modArgs.get('uselib_local', '')) + listify(modArgs.get('use',''))
+ uselib = listify(modArgs.get('uselib', '')) + ['CSTD', 'CRUN']
+ targetsToAdd = listify(modArgs.get('targets_to_add', ''))
+ includes = listify(modArgs.get('includes', 'include'))
+ source = listify(modArgs.get('source', '')) or None
+ install_path = modArgs.get('install_path', env['install_bindir'])
+
+ if not source:
+ source = bld.path.make_node(modArgs.get('source_dir', modArgs.get('sourcedir', 'source'))).ant_glob('*.c*', excl=modArgs.get('source_filter', ''))
+
+ exe = bld.program(features = 'add_targets',
+ source=source, name=progName,
+ includes=includes, defines=defines,
+ use=uselib_local, uselib=uselib,
+ env=env.derive(), target=progName, path=path,
+ install_path=install_path,
+ targets_to_add=targetsToAdd)
+
+ addSourceTargets(bld, env, path, exe)
+
+ return exe
+
+ def swigModule(self, **modArgs):
+ """
+ Builds a SWIG C++ module
+ TODO: Add support for C as well
+ """
+ bld = self
+ if 'env' in modArgs:
+ env = modArgs['env']
+ else:
+ env = bld.env
+
+ if 'PYTHON' in env and env['PYTHON'] and bld.is_defined('HAVE_PYTHON_H'):
+ modArgs = dict((k.lower(), v) for k, v in list(modArgs.items()))
+
+ name = modArgs['name']
+ codename = name
+
+ package_name = Context.APPNAME
+ try:
+ package_name = modArgs['package']
+ except:
+ pass
+
+ # name for the task to generate our __init__.py file
+ # (remember we need one in each package)
+ init_tgen_name = 'python_init_file_' + package_name
+
+ prefix = env['prefix_' + name]
+ if prefix:
+ codename = prefix + name
+
+ postfix = env['postfix_' + name]
+ if postfix:
+ codename = codename + postfix
+
+ swigSource = os.path.join('source', name.replace('.', '_') + '.i')
+ if env['install_headers']:
+ self.install_files(os.path.join(env['install_includedir'], 'swig'), swigSource)
+ target = '_' + codename.replace('.', '_')
+ use = modArgs['use'] + ' ' + init_tgen_name
+ installPath = os.path.join(env['install_pydir'], package_name)
+ taskName = name + '-python'
+ exportIncludes = listify(modArgs.get('export_includes', 'source'))
+
+ # If we have Swig, when the Swig target runs, it'll generate both the
+ # _wrap.cxx file and the .py file and then copy them both to the
+ # installation directory. If you just clobber the install directory
+ # and re-run waf install, it'll just copy the .so over though - not
+ # the .py file. Same problem if you don't have Swig. This target
+ # will actually compile the .py file to a .pyc, but the other thing is
+ # it'll copy the file over to the installation directory for us.
+ # We ensure this always runs via 'add_targets'
+ copyFilesTarget = target + '_py'
+ bld(features = 'py',
+ target = copyFilesTarget,
+ env = env.derive(),
+ install_path = installPath,
+ source = bld.path.make_node('source').ant_glob('**/*.py'))
+
+ # this turns the folder at the destination path into a package
+
+ # Our package might be 'coda,' and then the modules under that
+ # package would be mem, coda_sys, etc.
+ # The current function executes for each module.
+ # However, __init__.py gets installed at the package level.
+ # So we're checking for the existence of a task generator
+ # for the __init__.py for this module's package.
+ # If we omit the check and have duplicate tgens,
+ # the init's will overwrite each other and we get
+ # nasty race conditions.
+ initTarget = init_tgen_name
+ try:
+ # This will throw if the task generator hasn't been created yet
+ bld.get_tgen_by_name(init_tgen_name)
+ except Errors.WafError:
+ bld(features = 'python_package',
+ name = initTarget,
+ target='__init__.py',
+ install_path = installPath)
+
+ targetsToAdd = [copyFilesTarget, initTarget]
+
+ # Tried to do this in process_swig_linkage() but it's too late
+ # TODO: See if there's a cleaner way to do this
+ # Basically, for Visual Studio if the -python targets are on the
+ # use line, waf for some reason will not add in all the C++
+ # dependencies that are needed, even if you put that C++ dependency
+ # also explicitly on the use line. On Windows, we really logically
+ # just want those to go on the targets_to_add line anyway.
+ if env['COMPILER_CXX'] == 'msvc':
+ updatedUse = []
+ targetsToAdd = [copyFilesTarget]
+
+ for lib in use.split():
+ if lib.endswith('-python'):
+ targetsToAdd.append(lib)
+ else:
+ updatedUse.append(lib)
+
+ use = updatedUse
+
+ if 'SWIG' in env and env['SWIG']:
+ # If Swig is available, let's use it to build the .cxx file
+ # This gets generated into the source/generated folder and we'll
+ # actually check it in so other developers can still use the Python
+ # bindings even if they don't have Swig
+ flags = '-python -c++'
+ if sys.version_info[0] >= 3 and not env['PYTHON_AGNOSTIC']:
+ flags += ' -py3'
+ bld(features = 'cxx cshlib pyext add_targets swig_linkage includes',
+ source = swigSource,
+ target = target,
+ use = use,
+ export_includes = exportIncludes,
+ env = env.derive(),
+ swig_flags = flags,
+ install_path = installPath,
+ name = taskName,
+ targets_to_add = targetsToAdd,
+ swig_install_fun = swigCopyGeneratedSources)
+ else:
+ # If Swig is not available, use the cxx file already sitting around
+ # that Swig generated sometime in the past
+ bld(features = 'cxx cshlib pyext add_targets swig_linkage includes',
+ source = os.path.join('source', 'generated', codename.replace('.', '_') + '_wrap.cxx'),
+ target = target,
+ use = use,
+ export_includes = exportIncludes,
+ env = env.derive(),
+ name = taskName,
+ targets_to_add = targetsToAdd,
+ install_path = installPath)
+
+ def getBuildDir(self, path=None):
+ """
+ Returns the build dir, relative to where you currently are (bld.path)
+ """
+ if path is None:
+ path = self.path
+ return path.find_or_declare('.').abspath()
+
+ def mexify(self, **modArgs):
+ """
+ Utility for compiling a mex file (with mexFunction) to a mex shared lib
+ """
+ bld = self
+ env = self._getEnv(modArgs)
+
+ if 'HAVE_MATLAB' in self.env:
+ modArgs = dict((k.lower(), v) for k, v in list(modArgs.items()))
+ lang = modArgs.get('lang', 'c++')
+ libExeType = {'c++':'cxx', 'c':'c'}.get(lang, 'cxx')
+ path = modArgs.get('path',
+ 'dir' in modArgs and bld.path.find_dir(modArgs['dir']) or bld.path)
+
+ #override the shlib pattern
+ env = env.derive()
+ shlib_pattern = '%sshlib_PATTERN' % libExeType
+ if env[shlib_pattern].startswith('lib'):
+ env[shlib_pattern] = env[shlib_pattern][3:]
+ env[shlib_pattern] = splitext(env[shlib_pattern])[0] + env['MEX_EXT']
+
+ module_deps = list(['%s-%s' % (x, lang) for x in listify(modArgs.get('module_deps', ''))])
+ defines = self.__getDefines(env) + listify(modArgs.get('defines', ''))
+ uselib_local = module_deps + listify(modArgs.get('uselib_local', '')) + listify(modArgs.get('use',''))
+ uselib = listify(modArgs.get('uselib', '')) + ['CSTD', 'CRUN', 'MEX']
+ includes = listify(modArgs.get('includes', 'include'))
+ installPath = modArgs.get('install_path', None)
+ source = modArgs.get('source', None)
+ name = modArgs.get('name', None)
+ targetName = modArgs.get('target', None)
+
+ if source:
+ source = str(source)
+ name = splitext(split(str(source))[1])[0]
+
+ mex = bld(features='%s %sshlib'%(libExeType, libExeType), target=targetName or name,
+ name=name, use=uselib_local,
+ uselib=uselib, env=env.derive(), defines=defines,
+ path=path, source=source, includes=includes,
+ install_path=installPath or '${PREFIX}/mex')
+ if not source:
+ mex.source = path.ant_glob(modArgs.get('source_dir', modArgs.get('sourcedir', 'source')) + '/*')
+ mex.source = list(filter(partial(lambda x, t: basename(str(t)) not in x, modArgs.get('source_filter', '').split()), lib.source))
+ pattern = env['%s_PATTERN' % (env['LIB_TYPE'] or 'staticlib')]
+
+class GlobDirectoryWalker:
+ """ recursively walk a directory, matching filenames """
+ def __init__(self, directory, patterns=["*"]):
+ self.stack = [directory]
+ self.patterns = patterns
+ self.files = []
+ self.index = 0
+
+ def __iter__(self):
+ return self.next()
+
+ def next(self):
+ while True:
+ try:
+ file = self.files[self.index]
+ self.index = self.index + 1
+ except IndexError:
+ # pop next directory from stack
+ if len(self.stack) == 0:
+ return
+ self.directory = self.stack.pop()
+ if isdir(self.directory):
+ self.files = os.listdir(self.directory)
+ else:
+ self.files, self.directory = [self.directory], ''
+ self.index = 0
+ else:
+ # got a filename
+ fullname = join(self.directory, file)
+ if isdir(fullname) and not os.path.islink(fullname):
+ self.stack.append(fullname)
+ for p in self.patterns:
+ if fnmatch.fnmatch(file, p):
+ yield fullname
+
+def recursiveGlob(directory, patterns=["*"]):
+ return GlobDirectoryWalker(directory, patterns)
+
+
+def getPlatform(pwd=None, default=None):
+ """ returns the platform name """
+ platform = default or sys.platform
+
+ if platform != 'win32':
+ if not pwd:
+ pwd = os.getcwd()
+
+ locs = recursiveGlob(pwd, patterns=['config.guess'])
+
+ for loc in locs:
+ if not exists(loc): continue
+ try:
+ out = subprocess.Popen('chmod +x %s' % loc, shell=True, universal_newlines=True)
+ out.close()
+ except:{}
+ try:
+ out = subprocess.Popen(loc, shell=True, stdout=subprocess.PIPE, universal_newlines=True).stdout
+ platform = out.readline()
+ platform = platform.strip('\n')
+ out.close()
+ except:{}
+ else:
+ break
+ return platform
+
+def swigCopyGeneratedSources(tsk):
+ import shutil
+ genDir = tsk.inputs[0].parent.make_node('generated')
+ instDir = Utils.subst_vars(tsk.generator.install_path, tsk.env).replace('/', os.sep)
+ Utils.check_dir(genDir.abspath())
+ if tsk.generator.bld.is_install:
+ Utils.check_dir(instDir)
+ for file in tsk.outputs:
+ name = basename(str(file))
+ if file.suffix() in ['.c', '.cxx']:
+ shutil.copy2(file.abspath(), genDir.make_node(name.split('.')[0] + '_wrap' + file.suffix()).abspath())
+ elif file.suffix() == '.py':
+ shutil.copy2(file.abspath(), genDir.make_node(name).abspath())
+ if tsk.generator.bld.is_install:
+ shutil.copy2(file.abspath(), join(instDir, name))
+
+import zipfile
+def unzipper(inFile, outDir):
+ if not outDir.endswith(':') and not exists(outDir):
+ os.mkdir(outDir)
+
+ zf = zipfile.ZipFile(inFile)
+
+ dirs = list([x for x in zf.namelist() if x.endswith('/')])
+ dirs.sort()
+
+ for d in [x for x in [join(outDir, x) for x in dirs] if not exists(x)]:
+ os.mkdir(d)
+
+ for i, name in enumerate([x for x in zf.namelist() if not x.endswith('/')]):
+ outFile = open(join(outDir, name), 'wb')
+ outFile.write(zf.read(name))
+ outFile.flush()
+ outFile.close()
+
+
+def deprecated_callback(option, opt, value, parser):
+ Logs.warn('Warning: {0} is deprecated'.format(opt))
+
+
+def options(opt):
+ opt.load('compiler_cc')
+ opt.load('compiler_cxx')
+ opt.load('waf_unit_test')
+
+ if sys.version_info >= (2,5,0):
+ opt.load('msvs')
+
+ if Options.platform == 'win32':
+ opt.load('msvc')
+ opt.add_option('--with-crt', action='store', choices=['MD', 'MT'],
+ dest='crt', default='MD', help='Specify Windows CRT library - MT or MD (default)')
+
+ opt.add_option('--packages', action='store', dest='packages',
+ help='Target packages to build (common-separated list)')
+ opt.add_option('--dist-source', action='store_true', dest='dist_source', default=False,
+ help='Distribute source into the installation area (for delivering source)')
+ opt.add_option('--disable-warnings', action='store_false', dest='warnings',
+ default=True, help='Disable warnings')
+ opt.add_option('--warnings-as-errors', action='store_true', dest='warningsAsErrors',
+ default=False, help='Treat compiler warnings as errors')
+ opt.add_option('--enable-debugging', action='store_true', dest='debugging',
+ help='Enable debugging')
+ opt.add_option('--enable-cpp11', action='callback', callback=deprecated_callback)
+ opt.add_option('--enable-cpp17', action='callback', callback=deprecated_callback)
+ opt.add_option('--enable-64bit', action='callback', callback=deprecated_callback)
+ opt.add_option('--enable-32bit', action='callback', callback=deprecated_callback)
+ opt.add_option('--with-cflags', action='store', nargs=1, dest='cflags',
+ help='Set non-standard CFLAGS', metavar='FLAGS')
+ opt.add_option('--with-cxxflags', action='store', nargs=1, dest='cxxflags',
+ help='Set non-standard CXXFLAGS (C++)', metavar='FLAGS')
+ opt.add_option('--with-linkflags', action='store', nargs=1, dest='linkflags',
+ help='Set non-standard LINKFLAGS (C/C++)', metavar='FLAGS')
+ opt.add_option('--with-defs', action='store', nargs=1, dest='_defs',
+ help='Use DEFS as macro definitions', metavar='DEFS')
+
+ # This approach might not be sustainable as users might want (much) better control
+ # over the optimation flags. The "problem" is that different optimzation levels
+ # can, in particular, cause slight floating-point differences which can break
+ # e.g., existing regression tests.
+ #
+ # For example, GCC has a -Ofast flag which generates "even faster" code at the risk
+ # of violating C/C++ standards. There is also on-going research into faster floating-point
+ # math, those efforts are slowly making their way into language standards and compilers.
+ opt.add_option('--with-optz', action='store',
+ choices=['med', 'fast', 'faster', 'fastest', 'fastest-possible'],
+ default='faster', metavar='OPTZ',
+ help='Specify the optimization level for optimized/release builds')
+
+ opt.add_option('--libs-only', action='store_true', dest='libs_only',
+ help='Only build the libs (skip building the tests, etc.)')
+ opt.add_option('--shared', action='store_true', dest='shared_libs',
+ help='Build all libs as shared libs')
+ opt.add_option('--disable-symlinks', action='store_false', dest='symlinks',
+ default=True, help='Disable creating symlinks for libs')
+ opt.add_option('--no-headers', action='store_false', dest='install_headers',
+ default=True, help='Don\'t install module headers')
+ opt.add_option('--no-libs', action='store_false', dest='install_libs',
+ default=True, help='Don\'t install module libraries')
+ opt.add_option('--includedir', action='store', nargs=1, dest='includedir',
+ help='Override installation include directory')
+ opt.add_option('--libdir', action='store', nargs=1, dest='libdir',
+ help='Override installation lib directory')
+ opt.add_option('--bindir', action='store', nargs=1, dest='bindir',
+ help='Override installation bin directory')
+ opt.add_option('--sharedir', action='store', nargs=1, dest='sharedir',
+ help='Override installation share directory')
+ opt.add_option('--pydir', action='store', nargs=1, dest='pydir',
+ help='Override installation python directory')
+ opt.add_option('--install-source', action='store_true', dest='install_source', default=False,
+ help='Distribute source into the installation area (for delivering source)')
+ opt.add_option('--with-prebuilt-config', action='store', dest='prebuilt_config',
+ help='Specify a prebuilt modules config file (created from dumpconfig)')
+ opt.add_option('--disable-swig-silent-leak', action='store_false', dest='swig_silent_leak',
+ default=True, help='Allow swig to print memory leaks it detects')
+ opt.add_option('--junit-report', action='store', default=None,
+ help='Generates a junit formmated report file for unit test'
+ 'results. NOOP if junit_xml cannot be imported')
+
+
+def ensureCpp20Support(self):
+ # DEPRECATED.
+ # Keeping for now in case downstream code is still looking for it
+ self.env['cpp11support'] = True
+
+
+def configureCompilerOptions(self):
+ sys_platform = getPlatform(default=Options.platform)
+ appleRegex = r'i.86-apple-.*'
+ linuxRegex = r'.*-.*-linux-.*|i686-pc-.*|linux'
+ winRegex = r'win32'
+ osxRegex = r'darwin'
+
+ cxxCompiler = self.env['COMPILER_CXX']
+ ccCompiler = self.env['COMPILER_CC']
+
+ if ccCompiler == 'msvc':
+ cxxCompiler = ccCompiler
+ else:
+ if ccCompiler == 'gcc':
+ ccCompiler = 'gcc-10'
+ self.env['COMPILER_CC'] =ccCompiler
+
+ if cxxCompiler == 'g++':
+ cxxCompiler = 'g++-10'
+ self.env['COMPILER_CXX'] = cxxCompiler
+
+ if not cxxCompiler or not ccCompiler:
+ self.fatal('Unable to find C/C++ compiler')
+
+ config = {'cxx':{}, 'cc':{}}
+
+ #apple
+ if re.match(appleRegex, sys_platform):
+ self.env.append_value('LIB_DL', 'dl')
+ self.env.append_value('LIB_NSL', 'nsl')
+ self.env.append_value('LINKFLAGS_THREAD', '-pthread')
+ self.check_cc(lib='pthread', mandatory=True)
+
+ config['cxx']['debug'] = '-g'
+ config['cxx']['warn'] = '-Wall'
+ config['cxx']['verbose'] = '-v'
+ config['cxx']['64'] = '-m64'
+ config['cxx']['optz_debug'] = ''
+ config['cxx']['optz_med'] = '-O1'
+ config['cxx']['optz_fast'] = '-O2'
+ config['cxx']['optz_faster'] = '-O3'
+ config['cxx']['optz_fastest'] = config['cxx']['optz_faster']
+ config['cxx']['optz_fastest-possible'] = config['cxx']['optz_fastest'] # TODO: -march=native ?
+
+ #self.env.append_value('LINKFLAGS', '-fPIC -dynamiclib'.split())
+ self.env.append_value('LINKFLAGS', '-fPIC'.split())
+ self.env.append_value('CXXFLAGS', '-fPIC')
+
+ config['cc']['debug'] = config['cxx']['debug']
+ config['cc']['warn'] = config['cxx']['warn']
+ config['cc']['verbose'] = config['cxx']['verbose']
+ config['cc']['64'] = config['cxx']['64']
+ config['cc']['optz_debug'] = config['cxx']['optz_debug']
+ config['cc']['optz_med'] = config['cxx']['optz_med']
+ config['cc']['optz_fast'] = config['cxx']['optz_fast']
+ config['cc']['optz_faster'] = config['cxx']['optz_faster']
+ config['cc']['optz_fastest'] = config['cxx']['optz_fastest']
+ config['cc']['optz_fastest-possible'] = config['cxx']['optz_fastest-possible']
+
+ self.env.append_value('DEFINES', '_FILE_OFFSET_BITS=64 _LARGEFILE_SOURCE'.split())
+ self.env.append_value('CFLAGS', '-fPIC -dynamiclib'.split())
+
+ # GCC / ICC (for Linux or Solaris)
+ elif ccCompiler == 'gcc' or ccCompiler == 'gcc-10' or ccCompiler == 'icc':
+ if not re.match(winRegex, sys_platform):
+ self.env.append_value('LIB_DL', 'dl')
+ if not re.match(osxRegex, sys_platform):
+ self.env.append_value('LIB_NSL', 'nsl')
+ self.env.append_value('LIB_MATH', 'm')
+ self.env.append_value('LINKFLAGS_THREAD', '-pthread')
+ self.check_cc(lib='pthread', mandatory=True)
+
+ warningFlags = '-Wall'
+ if ccCompiler == 'gcc' or ccCompiler == 'gcc-10':
+ #warningFlags += ' -Wno-deprecated-declarations -Wold-style-cast'
+ warningFlags += ' -Wno-deprecated-declarations'
+ else:
+ warningFlags += ' -Wno-deprecated'
+ if Options.options.warningsAsErrors:
+ warningFlags += ' -Wfatal-errors'
+
+ # TODO: Verify there aren't any additional/different Intel compiler
+ # flags to set. By default, the Intel compiler will link its
+ # libraries in statically for executables but not for plugins.
+ # If you want the plugins to not depend on Intel libraries,
+ # configure with:
+ # --with-cflags=-static-intel --with-cxxflags=-static-intel --with-linkflags=-static-intel
+ if cxxCompiler == 'gcc' or cxxCompiler == 'gcc-10':
+ config['cxx']['debug'] = '-ggdb3'
+ config['cxx']['optz_debug'] = '-Og'
+ elif cxxCompiler == 'icpc':
+ config['cxx']['debug'] = '-g'
+ config['cxx']['optz_debug'] = ''
+ if cxxCompiler == 'g++' or cxxCompiler == 'g++-10' or cxxCompiler == 'icpc':
+ config['cxx']['warn'] = warningFlags.split()
+ config['cxx']['verbose'] = '-v'
+ config['cxx']['64'] = '-m64'
+ config['cxx']['linkflags_64'] = '-m64'
+ config['cxx']['optz_med'] = '-O1'
+ config['cxx']['optz_fast'] = '-O2'
+ # https://gcc.gnu.org/onlinedocs/gcc-12.2.0/gcc/x86-Options.html#x86-Options
+ # "Using -march=native enables all instruction subsets supported by the local machine ..."
+ config['cxx']['optz_faster'] = '-O3' # no -march=native
+ config['cxx']['optz_fastest'] = config['cxx']['optz_faster'] # TODO: add -march=native ?
+ # This "should" be part of fastest, but that could cause unexpected floating point differences.
+ # The "fastest-possible" option is new; see comments above.
+ config['cxx']['optz_fastest-possible'] = [ config['cxx']['optz_fastest'], '-march=native' ]
+
+ gxxCompileFlags='-fPIC -std=c++2a'
+ self.env.append_value('CXXFLAGS', gxxCompileFlags.split())
+
+ # DEFINES and LINKFLAGS will apply to both gcc and g++
+ self.env.append_value('DEFINES', '_FILE_OFFSET_BITS=64 _LARGEFILE_SOURCE'.split())
+
+ # TODO: Not sure why, but this flag doesn't work on Solaris
+ # Is there an equivalent to get the same functionality or
+ # is this an OS limitation?
+ linkFlags = '-fPIC'
+ if (not re.match(osxRegex, sys_platform)):
+ linkFlags += ' -Wl,-E'
+
+ self.env.append_value('LINKFLAGS', linkFlags.split())
+
+ if ccCompiler == 'gcc' or ccCompiler == 'gcc-10':
+ config['cc']['debug'] = '-ggdb3'
+ config['cc']['optz_debug'] = '-Og'
+ elif ccCompiler == 'icc':
+ config['cc']['debug'] = '-g'
+ config['cc']['optz_debug'] = ''
+ if ccCompiler == 'gcc' or ccCompiler == 'gcc-10' or ccCompiler == 'icc':
+ config['cc']['warn'] = warningFlags.split()
+ config['cc']['verbose'] = '-v'
+ config['cc']['64'] = '-m64'
+ config['cc']['linkflags_64'] = '-m64'
+ config['cc']['optz_med'] = '-O1'
+ config['cc']['optz_fast'] = '-O2'
+ # https://gcc.gnu.org/onlinedocs/gcc-12.2.0/gcc/x86-Options.html#x86-Options
+ # "Using -march=native enables all instruction subsets supported by the local machine ..."
+ config['cc']['optz_faster'] = '-O3' # no -march=native
+ config['cc']['optz_fastest'] = config['cc']['optz_faster'] # TODO: add -march=native ?
+ # This "should" be part of fastest, but that could cause unexpected floating point differences.
+ # The "fastest-possible" option is new; see comments above.
+ config['cc']['optz_fastest-possible'] = [ config['cc']['optz_fastest'], '-march=native' ]
+
+ self.env.append_value('CFLAGS', '-fPIC'.split())
+ # "gnu99" enables POSIX and BSD
+ self.env.append_value('CFLAGS', '-std=gnu99'.split())
+
+ elif re.match(winRegex, sys_platform):
+ crtFlag = '/%s' % Options.options.crt
+ crtDebug = '%sd' % crtFlag
+
+ # Sets the size of the stack (in bytes)
+ stackFlag = '/STACK:80000000'
+
+ # Skipping warning 4290 about how VS doesn't implement exception
+ # specifications properly.
+ # Skipping warning 4512 about being unable to generate an assignment
+ # operator (since we often do this intentionally).
+ # For warnings, use /W4 because /Wall
+ # gives us tons of warnings in the VS headers themselves
+ warningFlags = '/W4 /wd4290 /wd4512'
+ if Options.options.warningsAsErrors:
+ warningFlags += ' /WX'
+
+ vars = {}
+ vars['debug'] = ['/Zi', crtDebug]
+ if self.env['MSVC_VERSION'] >= 12:
+ # Starting with Visual Studio 2013, this is needed to support
+ # multiple CL.exe's writing to the common vc*.pdb file
+ vars['debug'] += ['/FS']
+ vars['warn'] = warningFlags.split()
+ vars['nowarn'] = '/w'
+ vars['verbose'] = ''
+ vars['optz_debug'] = ['', crtFlag]
+ vars['optz_med'] = ['-O2', crtFlag]
+ vars['optz_fast'] = ['-O2', crtFlag]
+ vars['optz_faster'] = vars['optz_fast']
+ vars['optz_fastest'] = ['-Ox', crtFlag]
+ vars['optz_fastest-possible'] = vars['optz_fastest']
+ # The MACHINE flag is is probably not actually necessary
+ # The linker should be able to infer it from the object files
+ # But doing this just to make sure we're really building 32/64 bit
+ # applications
+ vars['linkflags_64'] = [stackFlag, '/MACHINE:X64']
+
+ if Options.options.debugging:
+ # In order to generate a .pdb file, we need both the /Zi
+ # compilation flag and the /DEBUG linker flag
+ vars['linkflags_64'].append('/DEBUG')
+ else:
+ # Forcing the linker to not link incrementally. Hoping this will
+ # avoid an intermittent race condition we're having where manifest
+ # generation fails.
+ # Incremental is implied with /DEBUG so no reason to bother
+ # setting it there
+ vars['linkflags_64'].append('/INCREMENTAL:NO')
+
+ # choose the runtime to link against
+ # [/MD /MDd /MT /MTd]
+
+ config['cxx'].update(vars)
+ config['cc'].update(vars)
+
+ defines = '_FILE_OFFSET_BITS=64 ' \
+ '_LARGEFILE_SOURCE WIN32 _USE_MATH_DEFINES NOMINMAX WIN32_LEAN_AND_MEAN'.split()
+ flags = '/UUNICODE /U_UNICODE /EHs /GR'.split()
+
+ flags.append('/std:c++20')
+
+ self.env.append_value('DEFINES', defines)
+ self.env.append_value('CXXFLAGS', flags)
+ self.env.append_value('CFLAGS', flags)
+
+ else:
+ self.fatal('OS/platform currently unsupported: %s' % sys_platform)
+
+ if re.match(winRegex, sys_platform):
+ self.env.append_value('LIB_RPC', 'rpcrt4')
+ self.env.append_value('LIB_SOCKET', 'Ws2_32')
+
+ #CXX
+ if Options.options.warnings:
+ self.env.append_value('CXXFLAGS', config['cxx'].get('warn', ''))
+ self.env.append_value('CFLAGS', config['cc'].get('warn', ''))
+ else:
+ self.env.append_value('CXXFLAGS', config['cxx'].get('nowarn', ''))
+ self.env.append_value('CFLAGS', config['cc'].get('nowarn', ''))
+ if Options.options.verbose:
+ self.env.append_value('CXXFLAGS', config['cxx'].get('verbose', ''))
+ self.env.append_value('CFLAGS', config['cc'].get('verbose', ''))
+
+
+ # We don't really use variants right now, so keep the default environment linked to the variant.
+ variant = self.env
+ if Options.options.debugging:
+ variantName = '%s-debug' % sys_platform
+ variant.append_value('CXXFLAGS', config['cxx'].get('debug', ''))
+ variant.append_value('CFLAGS', config['cc'].get('debug', ''))
+ optz = 'debug'
+ variant.append_value('CXXFLAGS', config['cxx'].get('optz_%s' % optz, ''))
+ variant.append_value('CFLAGS', config['cc'].get('optz_%s' % optz, ''))
+ else:
+ variantName = '%s-release' % sys_platform
+ optz = Options.options.with_optz
+ variant.append_value('CXXFLAGS', config['cxx'].get('optz_%s' % optz, ''))
+ variant.append_value('CFLAGS', config['cc'].get('optz_%s' % optz, ''))
+
+ # Check if the system is 64-bit capable
+ is64Bit = True
+ if re.match(winRegex, sys_platform):
+ variantName = variantName.replace('32', '64')
+ else:
+ variantName = '%s-64' % variantName
+ variant.append_value('CXXFLAGS', config['cxx'].get('64', ''))
+ variant.append_value('CFLAGS', config['cc'].get('64', ''))
+ variant.append_value('LINKFLAGS', config['cc'].get('linkflags_64', ''))
+
+ self.env['IS64BIT'] = is64Bit
+ self.all_envs[variantName] = variant
+ self.setenv(variantName)
+
+ self.env['VARIANT'] = variant['VARIANT'] = variantName
+
+def getConfigFilename(moduleName):
+ return moduleName.replace('.', '_') + '_config.h'
+
+def listToTuple(defines):
+ d, u = {}, []
+ for line in defines:
+ split = line.split('=')
+ k = split[0]
+
+ #v = len(split) == 2 and split[1] or ' '
+ v = ' '
+ if len(split) == 2:
+ v = split[1]
+
+ if v != 0:
+ d[k] = v
+ else:
+ u.append(k)
+ return d,u
+
+def writeConfig(conf, callback, guardTag, infile=None, outfile=None, path=None, feature=None, substDict=None):
+ if path is None:
+ path = join('include', guardTag.replace('.', os.sep))
+ tempPath = join(str(conf.path.relpath()), path)
+ conf.env.append_value('header_builddir', guardTag + '=' + tempPath)
+ if outfile is None:
+ outfile = getConfigFilename(guardTag)
+ if feature is None:
+ path = join(path,'%s'%outfile)
+
+ conf.setenv('%s_config_env'%guardTag, conf.env.derive())
+ conf.env['define_key'] = []
+ callback(conf)
+
+ bldpath = conf.bldnode.abspath()
+
+ if feature is None:
+ conf.write_config_header(configfile=path,
+ guard='_%s_CONFIG_H_'%guardTag.upper().replace('.', '_'),
+ top=False, env=None, defines=True,
+ headers=False, remove=True)
+ else:
+ tuple = listToTuple(conf.env['DEFINES'])
+ defs = tuple[0]
+ undefs = tuple[1]
+
+ if feature == 'handleDefs':
+ handleDefsFile(input=infile, output=outfile, path=path, defs=defs, conf=conf)
+ elif feature == 'makeHeader':
+ makeHeaderFile(bldpath, output=outfile, path=path, defs=defs, undefs=undefs, chmod=None,
+ guard='_%s_CONFIG_H_'%guardTag.upper().replace('.', '_'))
+ elif feature == 'm4subst':
+ m4substFile(input=infile, output=outfile, path=path,
+ dict=substDict, env=conf.env.derive(), chmod=None)
+
+ conf.setenv('')
+
+
+def getDriverIncludes(bld, driver):
+ driverIncludeDirs = [x.split('=') for x in bld.env['header_builddir']
+ if x.startswith(driver)]
+ if not driverIncludeDirs:
+ bld.fatal('Could not find include dir for driver {}'.format(driver))
+ if len(driverIncludeDirs) != 1:
+ bld.fatal('Multiple options for include dir for driver {}'.format(
+ driver))
+
+ driverIncludeDir = driverIncludeDirs[0][1]
+ driverIncludePathname = os.path.join(bld.bldnode.abspath(),
+ driverIncludeDir)
+ return os.path.abspath(os.path.dirname(driverIncludePathname))
+
+def configure(self):
+
+ if self.env['DETECTED_BUILD_PY']:
+ return
+
+ if sys.version_info < (2, 7, 0):
+ self.fatal('build.py requires at least Python 2.7')
+
+ sys_platform = getPlatform(default=Options.platform)
+ winRegex = r'win32'
+
+ path = Utils.to_list(self.path.abspath())
+ if path.__len__() > 1 and sys_platform != 'win32':
+ raise Errors.WafError('Path "%s" contains spaces which cannot be resolved by the system.'%self.path.abspath())
+
+ self.msg('Platform', sys_platform, color='GREEN')
+
+ # Dirty fix to get around libpath problems..
+ if 'msvc' in self.options.check_c_compiler and re.match(winRegex, sys_platform):
+ # NOTE: Previously there was a workaround here (present until 6f20120)
+ # where we overrode cmd_and_log and had it error out if one of the
+ # paths in libpath did not exist (Kyle added this in 8cc3578).
+ # If you ever have to restore this, there was another spot below
+ # where we restored to the old cmd_and_log again.
+ # I assume this was to support more of the weird interactions we
+ # used to have with the Windows SDK and VS Express, especially
+ # when someone tried to build from a vanilla command prompt... I
+ # think this logic forced waf not to pick that configuration.
+ # The problem is that, with Visual Studio 2015 Express, you end
+ # up with incomplete stuff in LIB because you have incomplete
+ # stuff in LIBPATH (and waf-print-msvc.bat mashes those together)
+ # because vcvarsx86_amd64.bat, when it doesn't have WindowsSdkDir,
+ # sets LIBPATH to include WindowsLibPath (which is set to an
+ # incomplete path by vcvarsqueryregistry.bat) and ExtensionSDKDir
+ # (which is an empty path for me... would potentially be set by
+ # vcvarsqueryregistry.bat in certain cases). This is ok - waf
+ # will find and use VS 2015 Express fine if you just leave it
+ # alone. It's possible this is going to break some old versions
+ # of VS Express and/or Windows SDK, but the upside is that
+ # starting with VS 2012 Express, it ships with a 64-bit
+ # cross-compiler so hopefully these have largely faded out.
+ # I'm wondering if this also explains other weirdness I'd seen
+ # in the past where waf, with a VS Pro installation, wouldn't pick
+ # the real x64 target sometimes (we used to have to prefer
+ # x86_amd64 over x64 in MSVC_TARGETS to work around that).
+
+ # If we're in the Windows SDK or VS command prompt, having these set can mess things up.
+ env_lib = self.environ.get('LIB', None)
+ if 'LIB' in self.environ: del self.environ['LIB']
+ env_cl = os.environ.get('CL', None)
+ if 'CL' in os.environ: del os.environ['CL']
+
+ # x64 is the native 64-bit compiler, so prefer this one. If we
+ # just have VS Express though, we won't have it, so fall back on
+ # x86_amd64 - this is a 32-bit compiler that cross-compiles to
+ # 64-bit. VS 2012 Express ships with this one, and earlier VS
+ # Express versions can get this via the Windows SDK.
+ self.env['MSVC_TARGETS'] = ['x64', 'x86_amd64']
+
+ # Look for 32-bit msvc if we don't find 64-bit.
+ self.options.check_c_compiler = self.options.check_cxx_compiler = 'msvc'
+ try:
+ self.load('compiler_c')
+ except self.errors.ConfigurationError:
+ self.env['MSVC_TARGETS'] = None
+ self.tool_cache.remove(('msvc',id(self.env),None))
+ self.tool_cache.remove(('compiler_c',id(self.env),None))
+ self.msg('Checking for \'msvc\'', 'Warning: cound not find x64 msvc, looking for others', color='RED')
+
+ self.load('compiler_c')
+ self.load('compiler_cxx')
+ self.load('waf_unit_test')
+
+ # Reset LIB and CL
+ if 'msvc' in self.options.check_c_compiler and re.match(winRegex, sys_platform):
+ if env_lib is not None: self.environ['LIB'] = env_lib
+ if env_cl is not None: os.environ['CL'] = env_cl
+
+ # NOTE: The order is important here. We need to set up all
+ # compiler-specific flags (via both the options immediately below
+ # and configureCompilerOptions()) before we check for any functions
+ # or types in case these flags have an impact (most noticeably this
+ # will impact 32 vs. 64-bit specific checks like sizeof(size_t))
+ env = self.env
+ if Options.options.cxxflags:
+ env.append_unique('CXXFLAGS', Options.options.cxxflags.split())
+ if Options.options.cflags:
+ env.append_unique('CFLAGS', Options.options.cflags.split())
+ if Options.options.linkflags:
+ env.append_unique('LINKFLAGS', Options.options.linkflags.split())
+ if Options.options._defs:
+ env.append_unique('DEFINES', Options.options._defs.split(','))
+ configureCompilerOptions(self)
+ ensureCpp20Support(self)
+
+ env['PLATFORM'] = sys_platform
+
+ env['LIB_TYPE'] = Options.options.shared_libs and 'shlib' or 'stlib'
+ env['declspec_decoration'] = ''
+ env['windows_dll'] = False
+ if Options.options.shared_libs and env['COMPILER_CXX'] == 'msvc':
+ env['declspec_decoration'] = '__declspec(dllexport)'
+ env['windows_dll'] = True
+
+ env['install_headers'] = Options.options.install_headers
+ env['install_libs'] = Options.options.install_libs
+ env['install_source'] = Options.options.install_source
+
+ env['install_includedir'] = Options.options.includedir if Options.options.includedir else join(Options.options.prefix, 'include')
+ env['install_libdir'] = Options.options.libdir if Options.options.libdir else join(Options.options.prefix, 'lib')
+ env['install_bindir'] = Options.options.bindir if Options.options.bindir else join(Options.options.prefix, 'bin')
+ env['install_sharedir'] = Options.options.sharedir if Options.options.sharedir else join(Options.options.prefix, 'share')
+ env['install_pydir'] = Options.options.pydir if Options.options.pydir else '${PYTHONDIR}'
+
+ # Swig memory leak output
+ if Options.options.swig_silent_leak:
+ env['DEFINES'].append('SWIG_PYTHON_SILENT_MEMLEAK')
+
+ # Look for prebuilt modules
+ if Options.options.prebuilt_config:
+ with open(Options.options.prebuilt_config) as f:
+ fileContents = f.readlines()
+
+ if not env['INCLDUES']:
+ env['INCLUDES'] = []
+
+ env['INCLUDES'].append(fileContents[0].rstrip())
+ env.append_unique('CXXFLAGS', fileContents[1].rstrip().split())
+ env['LIB_PREBUILT'] = fileContents[2].rstrip().split()
+ env['LIBPATH_PREBUILT'] = fileContents[3].rstrip()
+
+ #flag that we already detected
+ self.env['DETECTED_BUILD_PY'] = True
+
+@TaskGen.feature('swig_linkage')
+@TaskGen.after_method('process_use')
+def process_swig_linkage(tsk):
+
+ # first we need to setup some platform specific
+ # options for specifying soname and passing linker
+ # flags
+
+ darwinRegex = r'i.86-apple-.*'
+ osxRegex = r'darwin'
+
+ platform = getPlatform(default=Options.platform)
+ compiler = tsk.env['COMPILER_CXX']
+
+ if compiler == 'msvc':
+ # TODO
+ # MSVC doesn't need this feature, apparently
+ # Not sure if cygwin/mingw does or not...
+ return
+
+ # TODO: Here we're using -Wl,_foo.so since if you just use -l_foo the linker
+ # assumes there's a 'lib' prefix in the filename which we don't have
+ # here. Instead, according to the ld man page, may be able to prepend
+ # a colon and do this instead: -l:_foo.so (not sure if this works with
+ # ld version <= 2.17)
+ libpattern = tsk.env['cshlib_PATTERN']
+ linkarg_pattern = '-Wl,%s'
+ rpath_pattern = '-Wl,-rpath=%s'
+ soname_pattern = '-soname=%s'
+
+ # overrides for osx
+ if re.match(darwinRegex,platform) or re.match(osxRegex,platform):
+ while '-bundle' in tsk.env.LINKFLAGS:
+ tsk.env.LINKFLAGS.remove('-bundle')
+ tsk.env.LINKFLAGS.append('-dynamiclib')
+ soname_pattern='-install_name,%s'
+ rpath_pattern='-Wl,-rpath,%s'
+
+ # so swig can find .i files to import
+ incstr = ''
+ for nod in tsk.includes:
+ incstr += ' -I' + nod.abspath()
+ if hasattr(tsk,'swig_flags'):
+ tsk.swig_flags = tsk.swig_flags + incstr
+
+ # Search for python libraries and
+ # add the target files explicitly as command line parameters for linking
+ package_list = []
+ newlib = []
+ for lib in tsk.env.LIB:
+
+ # get our library name so we
+ # can extract it's path from LIBPATH
+ # libname is the filename we'll be linking to
+ # searchstr is the module name
+ if lib.startswith('_coda_'):
+ libname = libpattern % lib
+ searchstr = lib[6:].replace('_','.')
+ elif lib.startswith('_'):
+ libname = libpattern % lib
+ searchstr = lib[1:].replace('_','.')
+ else:
+ # this isnt a python library, ignore it
+ newlib.append(lib)
+ continue
+
+ if searchstr.endswith(".base"):
+ searchstr = searchstr[:-5]
+
+ dep_path = os.path.basename(tsk.bld.get_tgen_by_name(searchstr + '-python').install_path)
+ package_list.append(dep_path)
+
+ # Python wrappers have the same module name as their associated
+ # C++ modules so if waf is configured with --shared searching through
+ # LIBPATH for our module name is not sufficient to find the *python* module
+ # TODO: find some way to tell the C++ and python shared libs apart without
+ # forcing our python modules to be in a folder called 'python'
+ searchstr = os.path.join('python',searchstr)
+
+ # search for a module with a matching name
+ libpath = ''
+ for libdir in tsk.env.LIBPATH:
+ if libdir.endswith(searchstr):
+ libpath = libdir
+ libpath = os.path.join(str(libpath), libname)
+
+ # finally add the path to the referenced python library
+ tsk.env.LINKFLAGS.append(libpath)
+
+ # We need to explicitly set our soname otherwise modules that
+ # link to *us* in the above fashion will not be able to do it
+ # without the same path
+ # (ie python dependencies at runtime after installation)
+
+ # This builds a soname_str suitable for hopefully any platform,
+ # compiler, and linker
+ soname_str = linkarg_pattern % (soname_pattern % (libpattern % tsk.target))
+ tsk.env.LINKFLAGS.append(soname_str)
+
+ # finally, we want to bake the library search paths straight in to
+ # our python extensions so we don't need to set an LD_LIBRARY_PATH
+ package_set = set(package_list)
+ base_path = os.path.join(':${ORIGIN}', '..')
+ dirlist = ''.join(str(os.path.join(base_path,s)) for s in package_set)
+ if dirlist:
+ rpath_str = rpath_pattern % (dirlist)
+ tsk.env.LINKFLAGS.append(rpath_str)
+
+ # newlib is now a list of our non-python libraries
+ tsk.env.LIB = newlib
+
+
+#
+# This task generator creates tasks that install an __init__.py
+# for our python packages. Right now all it does it create an
+# empty __init__.py in the install directory but if we decide
+# to go farther with our python bindings (ie, we have more than a
+# couple packages or need to actually put stuff in the __init__.py)
+# they will go here
+#
+
+@task_gen
+@feature('python_package')
+def python_package(tg):
+
+ # make sure we actually need to install stuff
+ if not 'install' in tg.bld.cmd:
+ return
+
+ # setup some paths
+ # we'll create our __init__.py right in our build directory
+ install_path = tg.install_path
+ pkg_name = os.path.join('packages', os.path.basename(install_path))
+ install_path = install_path.replace('${PYTHONDIR}',tg.env.PYTHONDIR)
+ dirname = os.path.join(tg.bld.bldnode.abspath(), pkg_name)
+ fname = os.path.join(tg.bld.bldnode.abspath(), pkg_name, tg.target)
+
+ #mk the build dir if it doesn't exist
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ # append to file or create
+ if not os.path.isfile(fname):
+ open(fname,'a').close()
+
+ # to install files the 'node' associated with the file
+ # needs to have a signature; the hash of the file is
+ # good enough for us.
+ relpath = os.path.join(pkg_name, tg.target)
+ nod = tg.bld.bldnode.make_node(relpath)
+ nod.sig = h_file(fname)
+
+ # schedule the file for installation
+ tg.bld.install_files(install_path,nod)
+
+@task_gen
+@feature('untar')
+def untar(tsk):
+ untarDriver(tsk.path, tsk.fname)
+
+def untarFile(path, fname, mode='r'):
+ import tarfile
+ f = path.find_or_declare(fname)
+ tf = tarfile.open(f.abspath(), mode)
+ p = path.abspath()
+ for x in tf:
+ tf.extract(x, p)
+ tf.close()
+
+@task_gen
+@feature('unzip')
+def unzip(tsk):
+ f = tsk.path.find_or_declare(tsk.fname)
+ unzipper(f.abspath(), tsk.path.abspath())
+
+# Needed to install files when using --target
+@task_gen
+@feature('install_tgt')
+def install_tgt(tsk):
+ if os.path.exists(tsk.dir.abspath()):
+ if not hasattr(tsk, 'copy_to_source_dir') or not tsk.env['install_source']:
+ tsk.copy_to_source_dir = False
+ if not hasattr(tsk, 'pattern'):
+ tsk.pattern = []
+ if not hasattr(tsk, 'relative_trick'):
+ tsk.relative_trick = False
+ if isinstance(tsk.pattern, str):
+ tsk.pattern = [tsk.pattern]
+ for pattern in tsk.pattern:
+ for file in tsk.dir.ant_glob(pattern):
+ if tsk.relative_trick:
+ dest = tsk.install_path
+ else:
+ dest = os.path.join(tsk.install_path, file.parent.path_from(tsk.dir))
+ inst = tsk.bld.install_files(dest, file,
+ relative_trick=tsk.relative_trick)
+ if inst and hasattr(tsk, 'chmod'):
+ inst.chmod = tsk.chmod
+
+ if tsk.copy_to_source_dir:
+ dest = os.path.join('${PREFIX}', 'source')
+ inst2 = tsk.bld.install_files(dest, file,
+ relative_trick=True)
+ if inst2 and hasattr(tsk, 'chmod'):
+ inst2.chmod = tsk.chmod
+ if not hasattr(tsk, 'files'):
+ tsk.files = []
+ if isinstance(tsk.files, str):
+ tsk.files = [tsk.files]
+ for file in tsk.files:
+ inst = tsk.bld.install_files(tsk.install_path, tsk.dir.make_node(file),
+ relative_trick=tsk.relative_trick)
+ if inst and hasattr(tsk, 'chmod'):
+ inst.chmod = tsk.chmod
+
+ if tsk.copy_to_source_dir:
+ dest = os.path.join('${PREFIX}', 'source')
+ inst2 = tsk.bld.install_files(dest, tsk.dir.make_node(file),
+ relative_trick=True)
+ if inst2 and hasattr(tsk, 'chmod'):
+ inst2.chmod = tsk.chmod
+
+@task_gen
+@feature('copytree_tgt')
+def copytree_tgt(tsk):
+ dest = Utils.subst_vars(tsk.dest, tsk.bld.env)
+ shutil.rmtree(dest, True)
+ symlinks = False
+ ignore = None
+ if hasattr(tsk, 'symlinks'):
+ symlinks = tsk.symlinks
+ if hasattr(tsk, 'ignore'):
+ ignore = tsk.ignore
+ shutil.copytree(tsk.src, dest, symlinks, ignore)
+
+@task_gen
+@feature('install_as_tgt')
+def install_as_tgt(tsk):
+ tsk.bld.install_as(tsk.install_as, tsk.file, cwd=tsk.dir)
+
+@task_gen
+@feature('symlink_as_tgt')
+def symlink_as_tgt(tsk):
+ tsk.bld.symlink_as(tsk.dest, tsk.src)
+
+# Allows a target to specify additonal targets to be executed.
+@task_gen
+@feature('add_targets')
+def add_targets(self):
+ if isinstance(self.targets_to_add, str):
+ self.targets_to_add = [self.targets_to_add]
+ for target in self.targets_to_add:
+ if isinstance(target, task_gen):
+ target.post()
+ else:
+ self.bld.get_tgen_by_name(target).post()
+
+# When building a DLL, don't install the implib.
+@task_gen
+@feature('no_implib')
+@after('apply_implib')
+def no_implib(tsk):
+ if getattr(tsk, 'implib_install_task', None):
+ tsk.implib_install_task.exec_task = Utils.nada
+
+@task_gen
+@feature('m4subst')
+def m4subst(tsk):
+ m4substFile(input=tsk.input, output=tsk.output, path=tsk.path, dict=tsk.dict, env=tsk.env, chmod=getattr(tsk, 'chmod', None))
+
+def m4substFile(input, output, path, dict={}, env=None, chmod=None):
+ import re
+ #similar to the subst in misc.py - but outputs to the src directory
+ m4_re = re.compile('@(\w+)@', re.M)
+
+ infile = join(path.abspath(), input)
+ dir = path.relpath()
+ outfile = join(dir, output)
+
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+
+ file = open(infile, 'r')
+ code = file.read()
+ file.close()
+
+ # replace all % by %% to prevent errors by % signs in the input file while string formatting
+ code = code.replace('%', '%%')
+
+ s = m4_re.sub(r'%(\1)s', code)
+
+ if not dict:
+ names = m4_re.findall(code)
+ for i in names:
+ dict[i] = env.get_flat(i) or env.get_flat(i.upper())
+
+ file = open(outfile, 'w')
+ file.write(s % dict)
+ file.close()
+ if chmod: os.chmod(outfile, chmod)
+
+@task_gen
+@feature('handleDefs')
+def handleDefs(tsk):
+ handleDefsFile(input=tsk.input, output=tsk.output, path=tsk.path, defs=tsk.defs, chmod=getattr(tsk, 'chmod', None))
+
+def handleDefsFile(input, output, path, defs, chmod=None, conf=None):
+ import re
+ infile = join(path.abspath(), input)
+ outfile = join(path.abspath(), output)
+
+ file = open(infile, 'r')
+ code = file.read()
+ file.close()
+
+ for k in list(defs.keys()):
+ v = defs[k]
+ if v is None:
+ v = ''
+ code = re.sub(r'#undef %s(\s*\n)' % k,
+ lambda x: '#define %s %s\n' % (k,v), code)
+ code = re.sub(r'#define %s 0(\s*\n)' % k,
+ lambda x: '#define %s %s\n' % (k,v), code)
+
+ # comment out remaining #undef lines
+ code = re.sub(r'(#undef[^\n\/\**]*)(\/\*.+\*\/)?(\n)',
+ r'/* \1 */\3', code)
+ file = open(outfile, 'w')
+ file.write(code)
+ file.close()
+ if chmod: os.chmod(outfile, chmod)
+
+@task_gen
+@feature('makeHeader')
+def makeHeader(tsk):
+ makeHeaderFile(output=tsk.output, path=tsk.path, defs=tsk.defs,
+ undefs=getattr(tsk, 'undefs', None),
+ chmod=getattr(tsk, 'chmod', None),
+ guard=getattr(tsk, 'guard', '__CONFIG_H__'))
+
+def makeHeaderFile(bldpath, output, path, defs, undefs, chmod, guard):
+ outfile = join(path.abspath(), output)
+ dest = open(outfile, 'w')
+ dest.write('#ifndef %s\n#define %s\n\n' % (guard, guard))
+
+ # Prevent the following from making it into a config header
+ toRemove = ['PYTHONDIR', 'PYTHONARCHDIR', 'NOMINMAX', '_SCL_SECURE_NO_WARNINGS', \
+ '_CRT_SECURE_NO_WARNINGS', 'WIN32_LEAN_AND_MEAN', 'WIN32', 'NOMINMAX', \
+ '_FILE_OFFSET_BITS', '_LARGEFILE_SOURCE']
+ for item in toRemove:
+ if item in defs:
+ del defs[item]
+
+ for k in list(defs.keys()):
+ v = defs[k]
+ if v is None:
+ v = ''
+ dest.write('\n#ifndef %s\n#define %s %s\n#endif\n' % (k, k, v))
+
+ if undefs:
+ for u in undefs:
+ dest.write('\n#undef %s\n' % u)
+
+ dest.write('\n#endif /* %s */\n' % guard)
+ dest.close()
+ if chmod: os.chmod(outfile, chmod)
+
+def getSolarisFlags(compilerName):
+ # Newer Solaris compilers use -m32 and -m64, so check to see if these flags exist
+ # If they don't, default to the old -xtarget flags
+ # TODO: Is there a cleaner way to do this with check_cc() instead?
+ bitFlag64 = '-xtarget=generic64'
+ (out, err) = subprocess.Popen([compilerName, '-flags'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate()
+
+ for line in out.split('\n'):
+ if re.match(r'-m64.*', line):
+ bitFlag64 = '-m64'
+
+ return bitFlag64
+
+
+def getWscriptTargets(bld, env, path):
+ # Here we're taking a look at the current stack and adding on all the
+ # wscript's that got us to this point.
+ # The main additional difficulty here is that we can't just add these
+ # pathnames on because then the same file will be marked to be installed by
+ # multiple other targets (i.e. modules/c++/A and modules/c++/B will both
+ # try to install modules/c++/wscript). The way around this is to create
+ # a named target for all these wscript's based on their full pathname.
+ wscriptTargets = []
+ for item in inspect.stack():
+ pathname = item[1]
+ filename = os.path.basename(pathname)
+ if filename in ['wscript', 'waf']:
+ try:
+ # If this succeeds, somebody else has already made this target
+ # for us
+ wscriptTargets.append(bld.get_tgen_by_name(pathname))
+ except:
+ # If we got here, no one has made the target yet, so we'll
+ # make it
+ # TODO: Not sure if there's a more efficient way to set
+ # dir below
+ dirname = os.path.dirname(pathname)
+ relpath = os.path.relpath(dirname, path.abspath())
+ wscriptTargets.append(bld(features='install_tgt',
+ target=pathname,
+ pattern=['wscript', 'waf', '*.py', 'include/**/*', 'config.guess'],
+ dir=path.make_node(relpath),
+ install_path='${PREFIX}/source',
+ relative_trick=True))
+ return wscriptTargets
+
+def addSourceTargets(bld, env, path, target):
+ if env['install_source']:
+ wscriptTargets = getWscriptTargets(bld, env, path)
+
+ # We don't ever go into the project.cfg files, we call fromConfig on
+ # them from a wscript, so we have to call these out separately.
+ for targetName in path.ant_glob(['project.cfg']):
+ try:
+ wscriptTargets.append(bld.get_tgen_by_name(targetName))
+ except:
+ wscriptTargets.append(bld(features='install_tgt',
+ target=targetName,
+ files = 'project.cfg',
+ pattern=['include/**/*'],
+ dir=path, install_path='${PREFIX}/source',
+ relative_trick=True))
+
+ source = []
+ for file in target.source:
+ if type(file) is str:
+ source.append(file)
+ else:
+ source.append(file.path_from(path))
+
+ target.targets_to_add.append(bld(features='install_tgt',
+ files = source,
+ dir=path, install_path='${PREFIX}/source',
+ relative_trick=True))
+
+ target.targets_to_add += wscriptTargets
+
+def junitUnitTestResults(bld):
+ '''
+ Summary calback function to generate JUnit formatted XML
+ '''
+ import junit_xml
+
+ # we also want a logged summary still
+ waf_unit_test.summary(bld)
+
+ # now generate a report
+ lst = getattr(bld,'utest_results',[])
+ test_cases = []
+ for name, retcode, stdout, stderr in lst:
+ so = stdout.decode()
+ se = stderr.decode()
+ tc = junit_xml.TestCase(name=name,
+ status=retcode,
+ stdout=so,
+ stderr=se)
+ if retcode:
+ messages = []
+ lines = se.split('\n')
+ for line in lines:
+ if 'FAILED' in line:
+ messages.append(line)
+ if len(messages) == 0:
+ messages = ['Unknown error occured that caused non-zero return code']
+
+ tc.add_failure_info('\n'.join(messages))
+ test_cases.append(tc)
+ ts = junit_xml.TestSuite('unit tests', test_cases)
+ with open(bld.options.junit_report, 'w') as fh:
+ fh.write(junit_xml.TestSuite.to_xml_string([ts]))
+
+
+def enableWafUnitTests(bld, set_exit_code=True):
+ """
+ If called, run all C++ unit tests after building
+ :param set_exit_code Flag to set a non-zero exit code if a unit test fails
+ """
+ # TODO: This does not work for Python files.
+ # The "nice" way to handle this is possibly not
+ # supported in this version of Waf.
+ if bld.options.junit_report is not None:
+ try:
+ import junit_xml
+ bld.add_post_fun(junitUnitTestResults)
+ except ImportError:
+ Logs.pprint('RED', 'Cannot generate requested junit report because we can\'t import junit-xml')
+ bld.add_post_fun(waf_unit_test.summary)
+ else:
+ bld.add_post_fun(waf_unit_test.summary)
+
+ if set_exit_code:
+ bld.add_post_fun(waf_unit_test.set_exit_code)
+
+
+class SwitchContext(Context.Context):
+ """
+ Easily switch output directories without reconfiguration.
+ """
+ cmd='switch'
+ def __init__(self,**kw):
+ super(SwitchContext,self).__init__(**kw)
+ def execute(self):
+ out_lock = self.path.make_node(Options.options.out).make_node(Options.lockfile)
+ root_lock = self.path.make_node(Options.lockfile)
+ if exists(out_lock.abspath()):
+ shutil.copy2(out_lock.abspath(), root_lock.abspath())
+ else:
+ raise Errors.WafError('Out directory "%s" not configured.'%Options.options.out)
+
+class CPPBuildContext(BuildContext, CPPContext):
+ pass
+class CPPListContext(ListContext, CPPContext):
+ pass
+class CPPCleanContext(CleanContext, CPPContext):
+ pass
+class CPPInstallContext(InstallContext, CPPContext):
+ pass
+class CPPConfigurationContext(ConfigurationContext, CPPContext):
+ pass
+class CPPOptionsContext(OptionsContext, CPPContext):
+ pass
+
+class CPPBaseDistcleanContext(Context.Context):
+ cmd='distclean'
+ def __init__(self,**kw):
+ super(CPPBaseDistcleanContext,self).__init__(**kw)
+class CPPDistcleanContext(CPPBaseDistcleanContext, CPPContext):
+ pass
+
+class CPPMSVSGenContext(msvs_generator, CPPContext):
+ def __init__(self, **kw):
+ self.waf_command = 'python waf'
+ super(CPPMSVSGenContext, self).__init__(**kw)
+
+class CPPEclipseGenContext(eclipse, CPPContext):
+ def __init__(self, **kw):
+ self.waf_command = 'python waf'
+ super(CPPEclipseGenContext, self).__init__(**kw)
+
+class CPPDumpEnvContext(dumpenv, CPPContext):
+ def __init__(self, **kw):
+ self.waf_command = 'python waf'
+ super(CPPDumpEnvContext, self).__init__(**kw)
+
+class CPPDumpLibContext(dumplib, CPPContext):
+ def __init__(self, **kw):
+ self.waf_command = 'python waf'
+ super(CPPDumpLibContext, self).__init__(**kw)
+
+class CPPDumpLibRawContext(dumplibraw, CPPContext):
+ def __init__(self, **kw):
+ self.waf_command = 'python waf raw'
+ super(CPPDumpLibRawContext, self).__init__(**kw)
+
+class CPPDumpConfigContext(dumpconfig, CPPContext):
+ def __init__(self, **kw):
+ self.waf_command = 'python waf'
+ super(CPPDumpConfigContext, self).__init__(**kw)
+
+class CPPMakeWheelContext(makewheel, CPPContext):
+ def __init__(self, **kw):
+ self.waf_command = 'python waf'
+ super(CPPMakeWheelContext, self).__init__(**kw)
+
+class CPPPackageContext(package, CPPContext):
+ def __init__(self, **kw):
+ self.waf_command = 'python waf'
+ super(CPPPackageContext, self).__init__(**kw)
diff --git a/externals/nitro/externals/coda-oss/build/config.guess b/externals/nitro/externals/coda-oss/build/config.guess
new file mode 100755
index 000000000..30646a1ad
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/build/config.guess
@@ -0,0 +1,1501 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
+# Inc.
+
+timestamp='2006-11-08'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+# 02110-1301, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Originally written by Per Bothner .
+# Please send patches to . Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub. If it succeeds, it prints the system name on stdout, and
+# exits with 0. Otherwise, it exits with 1.
+#
+# The plan is that this can be called by configure scripts if you
+# don't specify an explicit build system type.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to ."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in gcc-10 gcc c99 c89 cc; do
+ if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep __ELF__ >/dev/null
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "${UNAME_VERSION}" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE="alpha" ;;
+ "EV5 (21164)")
+ UNAME_MACHINE="alphaev5" ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE="alphaev56" ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE="alphapca56" ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE="alphapca57" ;;
+ "EV6 (21264)")
+ UNAME_MACHINE="alphaev6" ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE="alphaev67" ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE="alphaev69" ;;
+ "EV7 (21364)")
+ UNAME_MACHINE="alphaev7" ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE="alphaev79" ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ exit ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit ;;
+ arm:riscos:*:*|arm:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ i86pc:SunOS:5.*:*)
+ echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[45])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include
+ #include
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ ${HP_ARCH} = "hppa2.0w" ]
+ then
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep __LP64__ >/dev/null
+ then
+ HP_ARCH="hppa2.0w"
+ else
+ HP_ARCH="hppa64"
+ fi
+ fi
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:FreeBSD:*:*)
+ case ${UNAME_MACHINE} in
+ pc98)
+ echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit ;;
+ i*:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit ;;
+ x86:Interix*:[3456]*)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ EM64T:Interix*:[3456]* | authenticamd:Interix*:[3456]*)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+ echo i${UNAME_MACHINE}-pc-mks
+ exit ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i586-pc-interix
+ exit ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+ exit ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit ;;
+ arm*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ cris:Linux:*:*)
+ echo cris-axis-linux-gnu
+ exit ;;
+ crisv32:Linux:*:*)
+ echo crisv32-axis-linux-gnu
+ exit ;;
+ frv:Linux:*:*)
+ echo frv-unknown-linux-gnu
+ exit ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ mips:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef mips
+ #undef mipsel
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=mipsel
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=mips
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
+ /^CPU/{
+ s: ::g
+ p
+ }'`"
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+ ;;
+ mips64:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef mips64
+ #undef mips64el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=mips64el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=mips64
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
+ /^CPU/{
+ s: ::g
+ p
+ }'`"
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+ ;;
+ or32:Linux:*:*)
+ echo or32-unknown-linux-gnu
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-gnu
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-gnu
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
+ if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-gnu ;;
+ PA8*) echo hppa2.0-unknown-linux-gnu ;;
+ *) echo hppa-unknown-linux-gnu ;;
+ esac
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-gnu
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux
+ exit ;;
+ sh64*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-gnu
+ exit ;;
+ x86_64:Linux:*:*)
+ echo x86_64-unknown-linux-gnu
+ exit ;;
+ i*86:Linux:*:*)
+ # The BFD linker knows what the default object file format is, so
+ # first see if it will tell us. cd to the root directory to prevent
+ # problems with other programs or directories called `ld' in the path.
+ # Set LC_ALL=C to ensure ld outputs messages in English.
+ ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
+ | sed -ne '/supported targets:/!d
+ s/[ ][ ]*/ /g
+ s/.*supported targets: *//
+ s/ .*//
+ p'`
+ case "$ld_supported_targets" in
+ elf32-i386)
+ TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
+ ;;
+ a.out-i386-linux)
+ echo "${UNAME_MACHINE}-pc-linux-gnuaout"
+ exit ;;
+ coff-i386)
+ echo "${UNAME_MACHINE}-pc-linux-gnucoff"
+ exit ;;
+ "")
+ # Either a pre-BFD a.out linker (linux-gnuoldld) or
+ # one that does not give us useful --help.
+ echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
+ exit ;;
+ esac
+ # Determine whether the default compiler is a.out or elf
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include
+ #ifdef __ELF__
+ # ifdef __GLIBC__
+ # if __GLIBC__ >= 2
+ LIBC=gnu
+ # else
+ LIBC=gnulibc1
+ # endif
+ # else
+ LIBC=gnulibc1
+ # endif
+ #else
+ #if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+ LIBC=gnu
+ #else
+ LIBC=gnuaout
+ #endif
+ #endif
+ #ifdef __dietlibc__
+ LIBC=dietlibc
+ #endif
+EOF
+ eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
+ /^LIBC/{
+ s: ::g
+ p
+ }'`"
+ test x"${LIBC}" != x && {
+ echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+ exit
+ }
+ test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
+ ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i386.
+ echo i386-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes .
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux${UNAME_RELEASE}
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ case $UNAME_PROCESSOR in
+ unknown) UNAME_PROCESSOR=powerpc ;;
+ esac
+ echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NSE-?:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+eval $set_cc_for_build
+cat >$dummy.c <
+# include
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix\n"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+# include
+# if defined (BSD)
+# if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+# else
+# if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# endif
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# else
+ printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ c34*)
+ echo c34-convex-bsd
+ exit ;;
+ c38*)
+ echo c38-convex-bsd
+ exit ;;
+ c4*)
+ echo c4-convex-bsd
+ exit ;;
+ esac
+fi
+
+cat >&2 < in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/externals/nitro/externals/coda-oss/modules/c++/CMakeLists.txt b/externals/nitro/externals/coda-oss/modules/c++/CMakeLists.txt
new file mode 100644
index 000000000..edf1dd33f
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/CMakeLists.txt
@@ -0,0 +1,64 @@
+set(TARGET_LANGUAGE c++)
+
+# turn on maximum warnings
+if (MSVC)
+ # By default, there is a /W3 on the command-line from somewhere (?); adding
+ # /Wn results in a compiler warning.
+ #add_compile_options(/W4) # /Wall
+ string(REGEX REPLACE "/W[0-4]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") # /Wall
+
+elseif (UNIX)
+ add_compile_options(-Wall -pedantic -Wextra)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wregister") # -Wvolatile
+ #set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weffc++")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wzero-as-null-pointer-constant")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Woverloaded-virtual")
+ #set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wold-style-cast")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsuggest-final-types -Wsuggest-final-methods")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsuggest-override")
+endif()
+
+# add an interface library for unittests
+add_library(TestCase INTERFACE)
+target_link_libraries(TestCase INTERFACE sys-c++ str-c++)
+target_include_directories(TestCase INTERFACE
+ "$"
+ "$")
+install(FILES "include/TestCase.h" DESTINATION "${CODA_STD_PROJECT_INCLUDE_DIR}")
+install(TARGETS TestCase EXPORT ${CODA_EXPORT_SET_NAME})
+
+# common configuration checks, used by config and sys modules
+test_big_endian(BIGENDIAN)
+
+add_subdirectory("config")
+add_subdirectory("coda_oss")
+add_subdirectory("avx")
+add_subdirectory("except")
+add_subdirectory("sys")
+add_subdirectory("str")
+add_subdirectory("mem")
+add_subdirectory("re")
+add_subdirectory("io")
+add_subdirectory("zip")
+add_subdirectory("sio.lite")
+add_subdirectory("cli")
+add_subdirectory("dbi")
+add_subdirectory("types")
+add_subdirectory("unique")
+add_subdirectory("units")
+add_subdirectory("math")
+add_subdirectory("mt")
+add_subdirectory("logging")
+add_subdirectory("xml.lite")
+add_subdirectory("net") # must be after "re"
+add_subdirectory("net.ssl") # must be after "net"
+add_subdirectory("plugin")
+add_subdirectory("tiff")
+add_subdirectory("polygon")
+add_subdirectory("math.linear")
+add_subdirectory("math.poly")
+add_subdirectory("numpyutils")
+add_subdirectory("hdf5.lite")
+add_subdirectory("gsl")
+add_subdirectory("std")
+
diff --git a/externals/nitro/externals/coda-oss/modules/c++/coda-oss-lite.vcxproj b/externals/nitro/externals/coda-oss/modules/c++/coda-oss-lite.vcxproj
new file mode 100644
index 000000000..085528d67
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/coda-oss-lite.vcxproj
@@ -0,0 +1,503 @@
+
+
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Create
+ Create
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 16.0
+ Win32Proj
+ {9997e895-5161-4ddf-8f3f-099894cb2f21}
+ codaoss
+ 10.0
+ coda-oss-lite
+
+
+
+ DynamicLibrary
+ true
+ v143
+
+
+ DynamicLibrary
+ false
+ v143
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+ true
+ AllRules.ruleset
+
+
+ false
+
+
+
+ EnableAllWarnings
+ true
+ _DEBUG;_LIB;%(PreprocessorDefinitions);CODA_OSS_EXPORTS;CODA_OSS_DLL;MT_DEFAULT_PINNING=0;RE_ENABLE_STD_REGEX=1
+ pch.h
+ cli\include\;coda_oss\include;config\include\;except\include\;gsl\include\;io\include\;logging\include\;math\include\;math.linear\include\;math.poly\include\;mem\include\;mt\include\;plugin\include\;polygon\include\;re\include\;sio.lite\include\;std\include\;str\include\;sys\include\;tiff\include;types\include\;units\include\
+ Use
+ pch.h
+ true
+ Guard
+ ProgramDatabase
+ true
+ stdcpp20
+
+
+
+
+ true
+
+
+
+
+ Level3
+ true
+ true
+ true
+ NDEBUG;_LIB;%(PreprocessorDefinitions);CODA_OSS_EXPORTS;CODA_OSS_DLL;MT_DEFAULT_PINNING=0;RE_ENABLE_STD_REGEX=1
+ pch.h
+ cli\include\;coda_oss\include;config\include\;except\include\;gsl\include\;io\include\;logging\include\;math\include\;math.linear\include\;math.poly\include\;mem\include\;mt\include\;plugin\include\;polygon\include\;re\include\;sio.lite\include\;std\include\;str\include\;sys\include\;tiff\include;types\include\;units\include\
+ Use
+ pch.h
+ true
+ Guard
+ true
+ stdcpp20
+
+
+
+
+ true
+ true
+ true
+
+
+
+
+
+
\ No newline at end of file
diff --git a/externals/nitro/externals/coda-oss/modules/c++/coda-oss-lite.vcxproj.filters b/externals/nitro/externals/coda-oss/modules/c++/coda-oss-lite.vcxproj.filters
new file mode 100644
index 000000000..7f3bbc4f5
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/coda-oss-lite.vcxproj.filters
@@ -0,0 +1,1225 @@
+
+
+
+
+
+
+ config
+
+
+ config
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ coda_oss
+
+
+ coda_oss
+
+
+ coda_oss
+
+
+ coda_oss
+
+
+ coda_oss
+
+
+ coda_oss
+
+
+ coda_oss
+
+
+ coda_oss
+
+
+ except
+
+
+ except
+
+
+ except
+
+
+ except
+
+
+ except
+
+
+ except
+
+
+ except
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ types
+
+
+ types
+
+
+ types
+
+
+ types
+
+
+ types
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ mem
+
+
+ math
+
+
+ math
+
+
+ math
+
+
+ math
+
+
+ math
+
+
+ units
+
+
+ units
+
+
+ units
+
+
+ math.linear
+
+
+ math.linear
+
+
+ math.linear
+
+
+ math.linear
+
+
+ math.linear
+
+
+ math.linear
+
+
+ math.poly
+
+
+ math.poly
+
+
+ math.poly
+
+
+ math.poly
+
+
+ math.poly
+
+
+ math.poly
+
+
+ math.poly
+
+
+ math.poly
+
+
+ polygon
+
+
+ polygon
+
+
+ polygon
+
+
+ config
+
+
+ include
+
+
+ cli
+
+
+ cli
+
+
+ cli
+
+
+ cli
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ avx
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ re
+
+
+ re
+
+
+ re
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ plugin
+
+
+ plugin
+
+
+ plugin
+
+
+ sys
+
+
+ mem
+
+
+ mem
+
+
+ std
+
+
+ std
+
+
+ std
+
+
+ config
+
+
+ sys
+
+
+ sys
+
+
+ mt
+
+
+ sys
+
+
+ sys
+
+
+ types
+
+
+
+
+
+
+
+ except
+
+
+ except
+
+
+ except
+
+
+ except
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ str
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ sys
+
+
+ types
+
+
+ types
+
+
+ mem
+
+
+ mem
+
+
+ math
+
+
+ math
+
+
+ math
+
+
+ math.linear
+
+
+ polygon
+
+
+ cli
+
+
+ cli
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ io
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ mt
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ logging
+
+
+ re
+
+
+ re
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ sio.lite
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ tiff
+
+
+ plugin
+
+
+ sys
+
+
+ sys
+
+
+
+
+ {89602880-5929-45e9-a603-d13f81972397}
+
+
+ {619ad1da-f21c-4027-9b5d-23f08225b96f}
+
+
+ {59f3d9a1-06d3-4779-aef2-cc55223c3017}
+
+
+ {3051f4b6-dad4-47ea-b4bd-d25d9e09f782}
+
+
+ {44a8dfa7-099c-4dd5-87b6-3b05ce13580b}
+
+
+ {d714ac63-ef19-4629-a13f-59b550604842}
+
+
+ {d8059280-e435-4365-be67-865195b9e813}
+
+
+ {3613caf2-18d1-4af7-bfa0-a3712a44da45}
+
+
+ {0e318d08-1ee3-4644-a299-a81e436c9a32}
+
+
+ {6d8f380f-54e3-4d0d-bd1a-a2edd0efbaa7}
+
+
+ {1bf84676-4c62-4e2b-b943-59bf82f89126}
+
+
+ {b3d7f0e3-2e9f-4a19-b181-27cace7536c0}
+
+
+ {15f9b62f-d17e-4d84-bc34-de6fd5fbcb33}
+
+
+ {f2544ccb-0933-44c7-af39-cd986982af3d}
+
+
+ {9050a469-23a5-4da0-92b1-a07a8e52e9fc}
+
+
+ {de76f131-1cd6-4c3d-aa9c-37ca3b5079f0}
+
+
+ {cc681a99-da96-483e-a92a-eab0ea3a0ec0}
+
+
+ {4875d8c0-f285-460a-98a9-38a60f3fcffd}
+
+
+ {9720bf2c-6d77-4685-9318-d6e714065c99}
+
+
+ {9f9acff4-c149-4908-ae9d-67557974f4ee}
+
+
+ {83ae731c-66f7-468c-bc71-7cb57a363b5b}
+
+
+ {327a7b72-9a2d-48e4-9c23-2b5b9d2f3519}
+
+
+ {d76b6d3f-51f8-4cf8-bc56-35f720080a8b}
+
+
+ {387bc6cb-323a-42b3-8502-4fac72586d12}
+
+
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ gsl
+
+
+ std
+
+
+ std
+
+
+ std
+
+
+ std
+
+
+ std
+
+
+ std
+
+
+ std
+
+
+ std
+
+
+ sys
+
+
+
\ No newline at end of file
diff --git a/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/CPlusPlus.h b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/CPlusPlus.h
new file mode 100644
index 000000000..6b6a8a78c
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/CPlusPlus.h
@@ -0,0 +1,77 @@
+/* =========================================================================
+ * This file is part of coda_oss-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2021, 2022, Maxar Technologies, Inc.
+ *
+ * coda_oss-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not,
+ * see .
+ *
+ */
+
+#ifndef CODA_OSS_coda_oss_CPlusPlus_h_INCLUDED_
+#define CODA_OSS_coda_oss_CPlusPlus_h_INCLUDED_
+#pragma once
+
+#ifdef CODA_OSS_cplusplus
+ #error "CODA_OSS_cplusplus already #define'd."
+#endif
+
+#ifndef __cplusplus
+ #error "Only C++ compilation is supported."
+#endif
+#define CODA_OSS_cplusplus __cplusplus
+
+#if CODA_OSS_cplusplus < 201103L // We need at least C++11
+ #undef CODA_OSS_cplusplus // oops...try to fix
+
+ // MSVC only sets __cplusplus >199711L with the /Zc:__cplusplus command-line option.
+ // https://devblogs.microsoft.com/cppblog/msvc-now-correctly-reports-__cplusplus/
+ #if defined(_MSVC_LANG)
+ // https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros?view=msvc-160
+ // "Defined as an integer literal that specifies the C++ language standard targeted by the compiler."
+ #define CODA_OSS_cplusplus _MSVC_LANG
+ #elif defined(_MSC_VER)
+ #error "_MSVC_LANG should be #define'd."
+ #endif // _MSVC_LANG
+
+ #if defined(__GNUC__)
+ #endif // __GNUC__
+
+ #if defined(__INTEL_COMPILER)
+ #endif // __INTEL_COMPILER
+#endif // CODA_OSS_cplusplus
+
+#if CODA_OSS_cplusplus < 202002L
+ // oops ... try to fix
+ #if defined(__GNUC__) && (__cplusplus >= 201709L) // note > C++ 17 of 201703L
+ // Enough C++20 for our needs
+ #undef CODA_OSS_cplusplus
+ #define CODA_OSS_cplusplus 202002L
+ #endif
+#endif // CODA_OSS_cplusplus
+
+// Define a few macros as that's less verbose than testing against a version number
+// https://en.cppreference.com/w/cpp/preprocessor/replace#Predefined_macros
+#define CODA_OSS_cpp11 (CODA_OSS_cplusplus >= 201103L)
+#define CODA_OSS_cpp14 (CODA_OSS_cplusplus >= 201402L)
+#define CODA_OSS_cpp17 (CODA_OSS_cplusplus >= 201703L)
+#define CODA_OSS_cpp20 (CODA_OSS_cplusplus >= 202002L)
+#define CODA_OSS_cpp23 (CODA_OSS_cplusplus >= 202302L)
+
+#if !CODA_OSS_cpp20
+#error "Must compile with C++20 or greater."
+#endif
+
+#endif // CODA_OSS_coda_oss_CPlusPlus_h_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/bit.h b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/bit.h
new file mode 100644
index 000000000..2f9f1083a
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/bit.h
@@ -0,0 +1,92 @@
+/* =========================================================================
+ * This file is part of coda_oss-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2020-2022, Maxar Technologies, Inc.
+ *
+ * coda_oss-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_coda_oss_bit_h_INCLUDED_
+#define CODA_OSS_coda_oss_bit_h_INCLUDED_
+
+#include
+#include
+#ifdef __GNUC__
+#include // "These functions are GNU extensions."
+#endif
+
+#include
+#include
+
+#include "coda_oss/namespace_.h"
+namespace coda_oss
+{
+ using std::endian;
+
+ // https://en.cppreference.com/w/cpp/numeric/byteswap
+ namespace details
+ {
+ // Overloads for common types
+ inline constexpr uint8_t byteswap(uint8_t val) noexcept
+ {
+ return val; // no-op
+ }
+ #if defined(_MSC_VER)
+ // These routines should generate a single instruction; see
+ // https://devblogs.microsoft.com/cppblog/a-tour-of-4-msvc-backend-improvements/
+ inline uint16_t byteswap(uint16_t val) noexcept
+ {
+ return _byteswap_ushort(val);
+ }
+ inline uint32_t byteswap(uint32_t val) noexcept
+ {
+ return _byteswap_ulong(val);
+ }
+ inline uint64_t byteswap(uint64_t val) noexcept
+ {
+ return _byteswap_uint64(val);
+ }
+ #elif defined(__GNUC__)
+ inline uint16_t byteswap(uint16_t val) noexcept
+ {
+ return bswap_16(val);
+ }
+ inline uint32_t byteswap(uint32_t val) noexcept
+ {
+ return bswap_32(val);
+ }
+ inline uint64_t byteswap(uint64_t val) noexcept
+ {
+ return bswap_64(val);
+ }
+ #else
+ #error "No platform-specific byteswap()" // TODO: do something else?
+ #endif
+ }
+
+ template
+ inline T byteswap(T n) noexcept
+ {
+ // "std::byteswap participates in overload resolution only if T satisfies integral, i.e., T is an integer type. The program is
+ // ill-formed if T has padding bits."
+ static_assert(std::is_integral::value, "T must be integral");
+
+ using unsigned_t = std::make_unsigned_t; // "Since C++14" https://en.cppreference.com/w/cpp/types/make_unsigned
+ return details::byteswap(static_cast(n));
+ }
+}
+
+#endif // CODA_OSS_coda_oss_bit_h_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/cstddef.h b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/cstddef.h
new file mode 100644
index 000000000..02b5d720b
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/cstddef.h
@@ -0,0 +1,32 @@
+/* =========================================================================
+ * This file is part of coda_oss-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2020, Maxar Technologies, Inc.
+ *
+ * coda_oss-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_coda_oss_cstddef_h_INCLUDED_
+#define CODA_OSS_coda_oss_cstddef_h_INCLUDED_
+
+#include
+
+namespace coda_oss
+{
+ using std::byte;
+}
+
+#endif // CODA_OSS_coda_oss_cstddef_h_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/namespace_.h b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/namespace_.h
new file mode 100644
index 000000000..5e054a3f4
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/namespace_.h
@@ -0,0 +1,29 @@
+/* =========================================================================
+ * This file is part of coda_oss-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2020, Maxar Technologies, Inc.
+ *
+ * coda_oss-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_coda_oss_namespace__h_INCLUDED_
+#define CODA_OSS_coda_oss_namespace__h_INCLUDED_
+
+namespace coda_oss
+{
+}
+
+#endif // CODA_OSS_coda_oss_namespace__h_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/span.h b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/span.h
new file mode 100644
index 000000000..9ed4c007b
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/span.h
@@ -0,0 +1,37 @@
+/* =========================================================================
+ * This file is part of coda_oss-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2004 - 2014, MDA Information Systems LLC
+ *
+ * coda_oss-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not,
+ * see .
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_coda_oss_span_h_INCLUDED_
+#define CODA_OSS_coda_oss_span_h_INCLUDED_
+
+#include
+
+namespace coda_oss
+{
+ using std::span;
+
+ // https://en.cppreference.com/w/cpp/container/span/as_bytes
+ using std::as_bytes;
+ using std::as_writable_bytes;
+}
+
+#endif // CODA_OSS_coda_oss_span_h_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/string.h b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/string.h
new file mode 100644
index 000000000..cb0cd4c13
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/string.h
@@ -0,0 +1,35 @@
+/* =========================================================================
+ * This file is part of coda_oss-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2020-2022, Maxar Technologies, Inc.
+ *
+ * coda_oss-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not,
+ * see .
+ *
+ */
+
+#pragma once
+#ifndef CODA_OSS_coda_oss_string_h_INCLUDED_
+#define CODA_OSS_coda_oss_string_h_INCLUDED_
+
+#include
+
+#include "coda_oss/namespace_.h"
+namespace coda_oss
+{
+ using std::u8string;
+}
+
+#endif // CODA_OSS_coda_oss_string_h_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/type_traits.h b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/type_traits.h
new file mode 100644
index 000000000..f95ff49fa
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/coda_oss/include/coda_oss/type_traits.h
@@ -0,0 +1,33 @@
+/* =========================================================================
+ * This file is part of coda_oss-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2022, Maxar Technologies, Inc.
+ *
+ * coda_oss-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_coda_oss_type_traits_h_INCLUDED_
+#define CODA_OSS_coda_oss_type_traits_h_INCLUDED_
+
+#include
+
+#include "coda_oss/namespace_.h"
+namespace coda_oss
+{
+using std::is_trivially_copyable;
+}
+
+#endif // CODA_OSS_coda_oss_type_traits_h_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/mt/tests/ThreadGroupAffinityTest.cpp b/externals/nitro/externals/coda-oss/modules/c++/mt/tests/ThreadGroupAffinityTest.cpp
new file mode 100644
index 000000000..57f3e3f68
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/mt/tests/ThreadGroupAffinityTest.cpp
@@ -0,0 +1,186 @@
+/* =========================================================================
+ * This file is part of mt-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2004 - 2019, MDA Information Systems LLC
+ *
+ * mt-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not,
+ * see .
+ *
+ */
+
+#include
+
+#if !defined(__APPLE_CC__) && (defined(__linux) || defined(__linux__))
+#include
+
+#include
+#include
+#include
+#include
+#include
+using namespace sys;
+using namespace mt;
+
+namespace
+{
+class MyRunTask : public Runnable
+{
+public:
+
+ MyRunTask(size_t threadNum, sys::AtomicCounter& counter) :
+ mThread(threadNum),
+ mThreadCounter(counter)
+ {
+ }
+
+ virtual void run() override
+ {
+ // Print diagnostics from inside the thread
+ while(true)
+ {
+ if (mThread == mThreadCounter.get())
+ {
+ mt::CriticalSection obtainLock(&mMutex);
+ std::cout << "Thread " << mThread
+ << " " << sys::ScopedCPUAffinityUnix().toString()
+ << "\n";
+ mThreadCounter.increment();
+ break;
+ }
+ }
+
+ // Do work, with periodic breaks to give the OS
+ // the opportunity to change which CPU the thread is
+ // assigned to
+ for (size_t trials = 0; trials < 10; ++trials)
+ {
+ constexpr size_t count = 10000000000;
+ std::atomic sum{0};
+ for (size_t ii = 0; ii < count; ++ii)
+ {
+ sum++;
+ }
+ sleep(5);
+ }
+ }
+
+private:
+ sys::AtomicCounter::ValueType mThread;
+ sys::Mutex mMutex;
+ sys::AtomicCounter& mThreadCounter;
+};
+}
+
+int main(int argc, char** argv)
+{
+ // This program simulates a workload with periodic breaks
+ // in an attempt to illustrate pinning threads spawned
+ // by a ThreadGroup to unique CPUs.
+ //
+ // It is recommended that one runs the following (or equivalent)
+ // to view per-process utilization:
+ // mpstat -P ALL 2 1000
+ //
+ // Running without the --pin option will not perform any pinning.
+ // Using mpstat, you should be able to see the assignments
+ // drift over time (if numThreads < numCPUsAvailable). If the
+ // machine is fairly quiet, drifting may take some time to manifest.
+ //
+ // With --pin enabled, there should be no drift -- threads will be
+ // stuck on the CPUs that are masked in the per-thread diagnostic
+ // output.
+ try
+ {
+ const size_t numCPUsAvailable = sys::OS().getNumCPUsAvailable();
+
+ //-----------------------------------------------------
+ // Handle CLI parameters
+ //-----------------------------------------------------
+ cli::ArgumentParser parser;
+ parser.addArgument("--threads",
+ "Number of threads to use",
+ cli::STORE,
+ "threads",
+ "INT")->setDefault(numCPUsAvailable);
+
+ parser.addArgument("--pin",
+ "Enable CPU pinning",
+ cli::STORE_TRUE,
+ "pinToCPU")->setDefault(false);
+ const std::unique_ptr options(parser.parse(argc, argv));
+
+ const bool pinToCPU = options->get("pinToCPU");
+ const size_t numThreads = options->get("threads");
+
+ //-----------------------------------------------------
+ // Print diagnostics
+ //-----------------------------------------------------
+ std::cout << "Num CPUs available: " << numCPUsAvailable << std::endl;
+ std::cout << "Num threads requested: " << numThreads << std::endl;
+ std::cout << "Use CPU pinning: " << pinToCPU << std::endl;
+ std::cout << "Available CPU mask: "
+ << sys::ScopedCPUAffinityUnix().toString() << std::endl;
+
+ if (numThreads > numCPUsAvailable && pinToCPU)
+ {
+ throw except::Exception(
+ Ctxt("Requested more threads than CPUs with pinning enabled"));
+ }
+
+ //-----------------------------------------------------
+ // Run the thread group operations,
+ // pinning if requested
+ //-----------------------------------------------------
+ ThreadGroup threads(pinToCPU);
+ const ThreadPlanner planner(numThreads, numThreads);
+ size_t threadNum = 0;
+ size_t startElement = 0;
+ size_t numElementsThisThread = 0;
+ sys::AtomicCounter threadCounter;
+
+ while(planner.getThreadInfo(threadNum, startElement, numElementsThisThread))
+ {
+ threads.createThread(new MyRunTask(threadNum, threadCounter));
+ ++threadNum;
+ }
+
+ threads.joinAll();
+ }
+
+ catch (const except::Throwable& t)
+ {
+ std::cout << "Exception Caught: " << t.toString() << std::endl;
+ return -1;
+ }
+ catch (...)
+ {
+ std::cout << "Exception Caught!" << std::endl;
+ return -1;
+ }
+
+ return 0;
+}
+
+#else
+
+#include
+int main (int, char**)
+{
+ std::cout << "Usable only on *nix systems" << std::endl;
+ return 0;
+}
+
+#endif
+
diff --git a/externals/nitro/externals/coda-oss/modules/c++/std/include/std/bit b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/bit
new file mode 100644
index 000000000..a26eecef3
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/bit
@@ -0,0 +1,48 @@
+/* =========================================================================
+ * This file is part of std-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2021, Maxar Technologies, Inc.
+ *
+ * std-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_std_bit_INCLUDED_
+#define CODA_OSS_std_bit_INCLUDED_
+
+#include
+
+#include "coda_oss/bit.h"
+#include "coda_oss/CPlusPlus.h"
+
+// Make it (too?) easy for clients to get our various std:: implementations
+#ifndef CODA_OSS_NO_std_byteswap
+ #if CODA_OSS_cpp23
+ #include
+ #define CODA_OSS_NO_std_byteswap 1 // provided by implementation, probably C++23
+ #endif
+ #ifndef CODA_OSS_NO_std_byteswap
+ #define CODA_OSS_NO_std_byteswap 0 // <= C++20, use our own
+ #endif
+#endif
+
+#if !CODA_OSS_NO_std_byteswap
+namespace std // This is slightly uncouth: we're not supposed to augment "std".
+{
+ using coda_oss::byteswap;
+}
+#endif // CODA_OSS_NO_std_byteswap
+
+#endif // CODA_OSS_std_bit_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/std/include/std/cstddef b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/cstddef
new file mode 100644
index 000000000..01bfcdd31
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/cstddef
@@ -0,0 +1,27 @@
+/* =========================================================================
+ * This file is part of std-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2021, Maxar Technologies, Inc.
+ *
+ * std-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_std_cstddef_INCLUDED_
+#define CODA_OSS_std_cstddef_INCLUDED_
+
+#include
+
+#endif // CODA_OSS_std_cstddef_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/std/include/std/filesystem b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/filesystem
new file mode 100644
index 000000000..4003fa017
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/filesystem
@@ -0,0 +1,28 @@
+/* =========================================================================
+ * This file is part of std-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2021, Maxar Technologies, Inc.
+ *
+ * std-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_std_filesystem_INCLUDED_
+#define CODA_OSS_std_filesystem_INCLUDED_
+
+#include
+#include "sys/filesystem.h"
+
+#endif // CODA_OSS_std_filesystem_INCLUDED_
\ No newline at end of file
diff --git a/externals/nitro/externals/coda-oss/modules/c++/std/include/std/optional b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/optional
new file mode 100644
index 000000000..60dacfead
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/optional
@@ -0,0 +1,27 @@
+/* =========================================================================
+ * This file is part of sys-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2021, Maxar Technologies, Inc.
+ *
+ * sys-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_std_optional_INCLUDED_
+#define CODA_OSS_std_optional_INCLUDED_
+
+#include
+
+#endif // CODA_OSS_std_optional_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/std/include/std/span b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/span
new file mode 100644
index 000000000..6b78ea9bb
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/span
@@ -0,0 +1,27 @@
+/* =========================================================================
+ * This file is part of std-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2021, Maxar Technologies, Inc.
+ *
+ * std-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_std_span_INCLUDED_
+#define CODA_OSS_std_span_INCLUDED_
+
+#include
+
+#endif // CODA_OSS_std_span_INCLUDED_
\ No newline at end of file
diff --git a/externals/nitro/externals/coda-oss/modules/c++/std/include/std/string b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/string
new file mode 100644
index 000000000..f8360a0e1
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/std/include/std/string
@@ -0,0 +1,27 @@
+/* =========================================================================
+ * This file is part of std-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2021, Maxar Technologies, Inc.
+ *
+ * std-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_std_string_INCLUDED_
+#define CODA_OSS_std_string_INCLUDED_
+
+#include
+
+#endif // CODA_OSS_std_string_INCLUDED_
\ No newline at end of file
diff --git a/externals/nitro/externals/coda-oss/modules/c++/sys/include/sys/filesystem.h b/externals/nitro/externals/coda-oss/modules/c++/sys/include/sys/filesystem.h
new file mode 100644
index 000000000..479532f23
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/sys/include/sys/filesystem.h
@@ -0,0 +1,47 @@
+/* =========================================================================
+ * This file is part of sys-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2020, Maxar Technologies, Inc.
+ *
+ * sys-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#pragma once
+#ifndef CODA_OSS_sys_filesystem_h_INCLUDED_
+#define CODA_OSS_sys_filesystem_h_INCLUDED_
+
+#include
+
+// always implement sys::filesystem::path
+#include "sys/sys_filesystem.h"
+
+#include "coda_oss/namespace_.h"
+namespace coda_oss
+{
+namespace filesystem
+{
+
+using std::filesystem::path;
+using std::filesystem::file_type;
+
+using std::filesystem::current_path;
+using std::filesystem::exists;
+using std::filesystem::is_directory;
+using std::filesystem::is_regular_file;
+
+}
+}
+
+#endif // CODA_OSS_sys_filesystem_h_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/sys/include/sys/sys_filesystem.h b/externals/nitro/externals/coda-oss/modules/c++/sys/include/sys/sys_filesystem.h
new file mode 100644
index 000000000..37bd740a2
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/sys/include/sys/sys_filesystem.h
@@ -0,0 +1,46 @@
+/* =========================================================================
+ * This file is part of sys-c++
+ * =========================================================================
+ *
+ * (C) Copyright 2020, Maxar Technologies, Inc.
+ *
+ * sys-c++ is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; If not, http://www.gnu.org/licenses/.
+ *
+ */
+#ifndef CODA_OSS_sys_sys_filesystem_h_INCLUDED_
+#define CODA_OSS_sys_sys_filesystem_h_INCLUDED_
+#pragma once
+
+#include
+
+namespace sys
+{
+namespace filesystem
+{
+ using std::filesystem::file_type;
+ using std::filesystem::path;
+
+ using std::filesystem::absolute;
+ using std::filesystem::create_directory;
+ using std::filesystem::current_path;
+ using std::filesystem::exists;
+ using std::filesystem::is_directory;
+ using std::filesystem::is_regular_file;
+ using std::filesystem::remove;
+ using std::filesystem::temp_directory_path;
+ using std::filesystem::file_size;
+}
+}
+
+#endif // CODA_OSS_sys_sys_filesystem_h_INCLUDED_
diff --git a/externals/nitro/externals/coda-oss/modules/c++/sys/source/sys_filesystem.cpp b/externals/nitro/externals/coda-oss/modules/c++/sys/source/sys_filesystem.cpp
new file mode 100644
index 000000000..8d91933a7
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/c++/sys/source/sys_filesystem.cpp
@@ -0,0 +1 @@
+#include "sys/sys_filesystem.h"
diff --git a/externals/nitro/externals/coda-oss/modules/drivers/hdf5/CMakeLists.txt b/externals/nitro/externals/coda-oss/modules/drivers/hdf5/CMakeLists.txt
new file mode 100644
index 000000000..406eb8411
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/modules/drivers/hdf5/CMakeLists.txt
@@ -0,0 +1,9 @@
+set(MODULE_NAME hdf5)
+set(TARGET_LANGUAGE c++)
+
+if(CODA_ENABLE_HDF5)
+ set(TARGET_LANGUAGE c++)
+ coda_add_module(${MODULE_NAME} VERSION 1.13.2)
+else()
+ message("${MODULE_NAME} will not be built since HDF5 is not enabled")
+endif()
diff --git a/externals/nitro/externals/coda-oss/test_package/CMakeLists.txt b/externals/nitro/externals/coda-oss/test_package/CMakeLists.txt
new file mode 100644
index 000000000..f3c77f2c9
--- /dev/null
+++ b/externals/nitro/externals/coda-oss/test_package/CMakeLists.txt
@@ -0,0 +1,14 @@
+cmake_minimum_required(VERSION 3.14)
+project(coda-oss-test_package)
+set(CMAKE_CXX_STANDARD 20)
+set(CXX_STANDARD_REQUIRED true)
+
+include("${CMAKE_BINARY_DIR}/conanbuildinfo.cmake")
+conan_basic_setup()
+
+#import targets
+include("${CMAKE_BINARY_DIR}/conan_paths.cmake")
+find_package(coda-oss)
+
+add_executable(test test.cpp)
+target_link_libraries(test types-c++ str-c++)