author Chris Peterson <>
Wed, 21 Aug 2019 07:08:04 +0000
changeset 489137 5ad6f4f0edeb8b9dff2c53d7b3fee508501390cd
parent 489125 252643ff91c5b12225350e8c02d45e00656f8bb6
child 489138 5babe33486a1407d9af695351b8a857d1702cc7f
permissions -rwxr-xr-x
Bug 1514965 - Part 1: Refactor mingw_clang checks for reuse. r=froydnj Differential Revision:

# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at

imply_option('--enable-release', mozilla_official)
imply_option('--enable-release', depends_if('MOZ_AUTOMATION')(lambda x: True))

          help='{Build|Do not build} with more conservative, release '
               'engineering-oriented options.{ This may slow down builds.|}')

def developer_options(value):
    if not value:
        return True

add_old_configure_assignment('DEVELOPER_OPTIONS', developer_options)
set_config('DEVELOPER_OPTIONS', developer_options)

# Code optimization
# ==============================================================

          help='Disable optimizations via compiler flags')

@depends('--enable-optimize', '--help')
def moz_optimize(option, _):
    flags = None

    if len(option):
        val = '2'
        flags = option[0]
    elif option:
        val = '1'
        val = None

    return namespace(

set_config('MOZ_OPTIMIZE', moz_optimize.optimize)
add_old_configure_assignment('MOZ_OPTIMIZE', moz_optimize.optimize)
add_old_configure_assignment('MOZ_CONFIGURE_OPTIMIZE_FLAGS', moz_optimize.flags)

# yasm detection
# ==============================================================
yasm = check_prog('YASM', ['yasm'], allow_missing=True)

@checking('yasm version')
def yasm_version(yasm):
    version = check_cmd_output(
        yasm, '--version',
        onerror=lambda: die('Failed to get yasm version.')
    return Version(version)

@depends(yasm, target)
def yasm_asflags(yasm, target):
    if yasm:
        asflags = {
            ('OSX', 'x86'): ['-f', 'macho32'],
            ('OSX', 'x86_64'): ['-f', 'macho64'],
            ('WINNT', 'x86'): ['-f', 'win32'],
            ('WINNT', 'x86_64'): ['-f', 'x64'],
        }.get((target.os, target.cpu), None)
        if asflags is None:
            # We're assuming every x86 platform we support that's
            # not Windows or Mac is ELF.
            if target.cpu == 'x86':
                asflags = ['-f', 'elf32']
            elif target.cpu == 'x86_64':
                asflags = ['-f', 'elf64']
        if asflags:
            asflags += ['-rnasm', '-pnasm']
        return asflags

set_config('YASM_ASFLAGS', yasm_asflags)

# Android NDK
# ==============================================================

@depends('--disable-compile-environment', target)
def compiling_android(compile_env, target):
    return compile_env and target.os == 'Android'

include('android-ndk.configure', when=compiling_android)

with only_when(target_is_osx):
    # MacOS deployment target version
    # ==============================================================
    # This needs to happen before any compilation test is done.

    option('--enable-macos-target', env='MACOSX_DEPLOYMENT_TARGET', nargs=1,
           default='10.9', help='Set the minimum MacOS version needed at runtime')

    @imports(_from='os', _import='environ')
    def macos_target(value):
        if value:
            # Ensure every compiler process we spawn uses this value.
            environ['MACOSX_DEPLOYMENT_TARGET'] = value[0]
            return value[0]

    set_config('MACOSX_DEPLOYMENT_TARGET', macos_target)
    add_old_configure_assignment('MACOSX_DEPLOYMENT_TARGET', macos_target)

def host_is_osx(host):
    if host.os == 'OSX':
        return True

with only_when(host_is_osx | target_is_osx):
    # MacOS SDK
    # =========
    js_option('--with-macos-sdk', env='MACOS_SDK_DIR', nargs=1,
           help='Location of platform SDK to use')

    @depends('--with-macos-sdk', host)
    @imports(_from='os.path', _import='isdir')
    @imports(_from='biplist', _import='readPlist')
    def macos_sdk(sdk, host):
        sdk_min_version = Version('10.11')
        sdk_max_version = Version('10.14')

        if sdk:
            sdk = sdk[0]
        elif host.os == 'OSX':
            sdk = check_cmd_output('xcrun', '--show-sdk-path', onerror=lambda: '').rstrip()
            if not sdk:
                die('Could not find the macOS SDK. Please use --with-macos-sdk to give '
                    'the path to a macOS SDK.')
            die('Need a macOS SDK when targeting macOS. Please use --with-macos-sdk '
                'to give the path to a macOS SDK.')

        if not isdir(sdk):
            die('SDK not found in %s. When using --with-macos-sdk, you must specify a '
                'valid SDK. SDKs are installed when the optional cross-development '
                'tools are selected during the Xcode/Developer Tools installation.'
                % sdk)
        obj = readPlist(os.path.join(sdk, 'SDKSettings.plist'))
        if not obj:
            die('Error parsing SDKSettings.plist in the SDK directory: %s' % sdk)
        if 'Version' not in obj:
            die('Error finding Version information in SDKSettings.plist from the SDK: %s' % sdk)
        version = Version(obj['Version'])
        if version < sdk_min_version:
            die('SDK version "%s" is too old. Please upgrade to at least %s. '
                'You may need to point to it using --with-macos-sdk=<path> in your '
                'mozconfig. Various SDK versions are available from '
                '' % (version, sdk_min_version))
        if version > sdk_max_version:
            die('SDK version "%s" is unsupported. Please downgrade to version '
                '%s. You may need to point to it using --with-macos-sdk=<path> in '
                'your mozconfig. Various SDK versions are available from '
                '' % (version, sdk_max_version))
        return sdk

    set_config('MACOS_SDK_DIR', macos_sdk)

with only_when(target_is_osx):
    with only_when(cross_compiling):
               env="MACOS_PRIVATE_FRAMEWORKS_DIR", nargs=1,
               help='Location of private frameworks to use')

        @imports(_from='os.path', _import='isdir')
        def macos_private_frameworks(value):
            if value and not isdir(value[0]):
                die('PrivateFrameworks not found not found in %s. When using '
                    '--with-macos-private-frameworks, you must specify a valid '
                    'directory', value[0])
            return value[0]

    def macos_private_frameworks(value):
        if value:
            return value
        return '/System/Library/PrivateFrameworks'

    set_config('MACOS_PRIVATE_FRAMEWORKS_DIR', macos_private_frameworks)

with only_when(host_is_osx):
    # Xcode state
    # ===========
              help='Do not check that Xcode is installed and properly configured')

    @depends(host, '--disable-xcode-checks')
    def xcode_path(host, xcode_checks):
        if host.kernel != 'Darwin' or not xcode_checks:

        # xcode-select -p prints the path to the installed Xcode. It
        # should exit 0 and return non-empty result if Xcode is installed.

        def bad_xcode_select():
            die('Could not find installed Xcode; install Xcode from the App '
                'Store, run it once to perform initial configuration, and then '
                'try again; in the rare case you wish to build without Xcode '
                'installed, add the --disable-xcode-checks configure flag')

        xcode_path = check_cmd_output('xcode-select', '--print-path',

        if not xcode_path:

        # Now look for the Command Line Tools.
        def no_cltools():
            die('Could not find installed Xcode Command Line Tools; '
                'run `xcode-select --install` and follow the instructions '
                'to install them then try again; if you wish to build without '
                'Xcode Command Line Tools installed, '
                'add the --disable-xcode-checks configure flag')

        check_cmd_output('pkgutil', '--pkg-info',

        return xcode_path

    set_config('XCODE_PATH', xcode_path)

# Compiler wrappers
# ==============================================================
# Normally, we'd use js_option and automatically have those variables
# propagated to js/src, but things are complicated by possible additional
# wrappers in CC/CXX, and by other subconfigures that do not handle those
# options and do need CC/CXX altered.
option('--with-compiler-wrapper', env='COMPILER_WRAPPER', nargs=1,
       help='Enable compiling with wrappers such as distcc and ccache')

js_option('--with-ccache', env='CCACHE', nargs='?',
          help='Enable compiling with ccache')

def ccache(value):
    if len(value):
        return value
    # If --with-ccache was given without an explicit value, we default to
    # 'ccache'.
    return 'ccache'

ccache = check_prog('CCACHE', progs=(), input=ccache)

          help='Compiler prefix to use when using ccache')

ccache_prefix = depends_if('CCACHE_PREFIX')(lambda prefix: prefix[0])
set_config('CCACHE_PREFIX', ccache_prefix)

# Distinguish ccache from sccache.

def ccache_is_sccache(ccache):
    return check_cmd_output(ccache, '--version').startswith('sccache')

@depends(ccache, ccache_is_sccache)
def using_ccache(ccache, ccache_is_sccache):
    return ccache and not ccache_is_sccache

@depends_if(ccache, ccache_is_sccache)
def using_sccache(ccache, ccache_is_sccache):
    return ccache and ccache_is_sccache

set_config('MOZ_USING_CCACHE', using_ccache)
set_config('MOZ_USING_SCCACHE', using_sccache)

       help='Print verbose sccache stats after build')

@depends(using_sccache, 'SCCACHE_VERBOSE_STATS')
def sccache_verbose_stats(using_sccache, verbose_stats):
    return using_sccache and bool(verbose_stats)

set_config('SCCACHE_VERBOSE_STATS', sccache_verbose_stats)

@depends('--with-compiler-wrapper', ccache)
@imports(_from='mozbuild.shellutil', _import='split', _as='shell_split')
def compiler_wrapper(wrapper, ccache):
    if wrapper:
        raw_wrapper = wrapper[0]
        wrapper = shell_split(raw_wrapper)
        wrapper_program = find_program(wrapper[0])
        if not wrapper_program:
            die('Cannot find `%s` from the given compiler wrapper `%s`',
                wrapper[0], raw_wrapper)
        wrapper[0] = wrapper_program

    if ccache:
        if wrapper:
            return tuple([ccache] + wrapper)
            return (ccache,)
    elif wrapper:
        return tuple(wrapper)

def using_compiler_wrapper(compiler_wrapper):
    return True

set_config('MOZ_USING_COMPILER_WRAPPER', using_compiler_wrapper)

# GC rooting and hazard analysis.
# ==============================================================
option(env='MOZ_HAZARD', help='Build for the GC rooting hazard analysis')

def hazard_analysis(value):
    if value:
        return True

set_config('MOZ_HAZARD', hazard_analysis)

# Cross-compilation related things.
# ==============================================================
js_option('--with-toolchain-prefix', env='TOOLCHAIN_PREFIX', nargs=1,
          help='Prefix for the target toolchain')

@depends('--with-toolchain-prefix', target, cross_compiling)
def toolchain_prefix(value, target, cross_compiling):
    if value:
        return tuple(value)
    if cross_compiling:
        return ('%s-' % target.toolchain, '%s-' % target.alias)

@depends(toolchain_prefix, target)
def first_toolchain_prefix(toolchain_prefix, target):
    # Pass TOOLCHAIN_PREFIX down to the build system if it was given from the
    # command line/environment (in which case there's only one value in the tuple),
    # or when cross-compiling for Android or OSX.
    if toolchain_prefix and (target.os in ('Android', 'OSX') or len(toolchain_prefix) == 1):
        return toolchain_prefix[0]

set_config('TOOLCHAIN_PREFIX', first_toolchain_prefix)
add_old_configure_assignment('TOOLCHAIN_PREFIX', first_toolchain_prefix)

# Compilers
# ==============================================================

def try_preprocess(compiler, language, source):
    return try_invoke_compiler(compiler, language, source, ['-E'])

@imports(_from='mozbuild.configure.constants', _import='CompilerType')
@imports(_from='textwrap', _import='dedent')
@imports(_from='__builtin__', _import='Exception')
def get_compiler_info(compiler, language):
    '''Returns information about the given `compiler` (command line in the
    form of a list or tuple), in the given `language`.

    The returned information includes:
    - the compiler type (clang-cl, clang or gcc)
    - the compiler version
    - the compiler supported language
    - the compiler supported language version
    # Note: We'd normally do a version check for clang, but versions of clang
    # in Xcode have a completely different versioning scheme despite exposing
    # the version with the same defines.
    # So instead, we make things such that the version is missing when the
    # clang used is below the minimum supported version (currently clang 4.0,
    # or 5.0 on mac).
    # We then only include the version information when the compiler matches
    # the feature check, so that an unsupported version of clang would have
    # no version number.
    check = dedent('''\
        #if defined(_MSC_VER) && defined(__clang__) && defined(_MT)
        %COMPILER "clang-cl"
        %VERSION __clang_major__.__clang_minor__.__clang_patchlevel__
        #elif defined(__clang__)
        %COMPILER "clang"
        #  if defined(__APPLE__)
        #    if __has_warning("-Wunguarded-availability")
        %VERSION __clang_major__.__clang_minor__.__clang_patchlevel__
        #    endif
        #  elif __has_attribute(diagnose_if)
        %VERSION __clang_major__.__clang_minor__.__clang_patchlevel__
        #  endif
        #elif defined(__GNUC__)
        %COMPILER "gcc"

        #if __cplusplus
        %cplusplus __cplusplus
        #elif __STDC_VERSION__

    # While we're doing some preprocessing, we might as well do some more
    # preprocessor-based tests at the same time, to check the toolchain
    # matches what we want.
    for name, preprocessor_checks in (
        ('CPU', CPU_preprocessor_checks),
        ('KERNEL', kernel_preprocessor_checks),
        ('OS', OS_preprocessor_checks),
        for n, (value, condition) in enumerate(preprocessor_checks.iteritems()):
            check += dedent('''\
                #%(if)s %(condition)s
                %%%(name)s "%(value)s"
            ''' % {
                'if': 'elif' if n else 'if',
                'condition': condition,
                'name': name,
                'value': value,
        check += '#endif\n'

    # Also check for endianness. The advantage of living in modern times is
    # that all the modern compilers we support now have __BYTE_ORDER__ defined
    # by the preprocessor.
    check += dedent('''\
        #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        %ENDIANNESS "little"
        #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
        %ENDIANNESS "big"

    result = try_preprocess(compiler, language, check)

    if not result:
        raise FatalCheckError(
            'Unknown compiler or compiler not supported.')

    # Metadata emitted by preprocessors such as GCC with LANG=ja_JP.utf-8 may
    # have non-ASCII characters. Treat the output as bytearray.
    data = {}
    for line in result.splitlines():
        if line.startswith(b'%'):
            k, _, v = line.partition(' ')
            k = k.lstrip('%')
            data[k] = v.replace(' ', '').lstrip('"').rstrip('"')
            log.debug('%s = %s', k, data[k])

        type = CompilerType(data['COMPILER'])
    except Exception:
        raise FatalCheckError(
            'Unknown compiler or compiler not supported.')

    cplusplus = int(data.get('cplusplus', '0L').rstrip('L'))
    stdc_version = int(data.get('STDC_VERSION', '0L').rstrip('L'))

    version = data.get('VERSION')
    if version:
        version = Version(version)

    return namespace(
        language='C++' if cplusplus else 'C',
        language_version=cplusplus if cplusplus else stdc_version,

def same_arch_different_bits():
    return (
        ('x86', 'x86_64'),
        ('ppc', 'ppc64'),
        ('sparc', 'sparc64'),

@imports(_from='mozbuild.shellutil', _import='quote')
def check_compiler(compiler, language, target):
    info = get_compiler_info(compiler, language)

    flags = []

    # Check language standards
    # --------------------------------------------------------------------
    if language != info.language:
        raise FatalCheckError(
            '`%s` is not a %s compiler.' % (quote(*compiler), language))

    # Note: We do a strict version check because there sometimes are backwards
    # incompatible changes in the standard, and not all code that compiles as
    # C99 compiles as e.g. C11 (as of writing, this is true of libnestegg, for
    # example)
    if info.language == 'C' and info.language_version != 199901:
        if info.type == 'clang-cl':

    # Note: this is a strict version check because we used to always add
    # -std=gnu++14.
    cxx14_version = 201402
    if info.language == 'C++':
        if info.language_version != cxx14_version:
            # MSVC headers include C++14 features, but don't guard them
            # with appropriate checks.
            if info.type == 'clang-cl':

    # Check compiler target
    # --------------------------------------------------------------------
    has_target = False
    if info.type == 'clang':
        if not info.kernel or info.kernel != target.kernel or \
                not info.endianness or info.endianness != target.endianness:
            flags.append('--target=%s' % target.toolchain)
            has_target = True

        # Add target flag when there is an OS mismatch (e.g. building for Android on
        # Linux). However, only do this if the target OS is in our whitelist, to
        # keep things the same on other platforms.
        elif target.os in OS_preprocessor_checks and (
                not info.os or info.os != target.os):
            flags.append('--target=%s' % target.toolchain)
            has_target = True

    if not has_target and (not info.cpu or info.cpu != target.cpu):
        same_arch = same_arch_different_bits()
        if (target.cpu, info.cpu) in same_arch:
        elif (info.cpu, target.cpu) in same_arch:
        elif info.type == 'clang-cl' and target.cpu == 'aarch64':
            flags.append('--target=%s' % target.toolchain)
        elif info.type == 'clang':
            flags.append('--target=%s' % target.toolchain)

    return namespace(

@imports(_from='__builtin__', _import='open')
def get_vc_paths(topsrcdir):
    def vswhere(args):
        program_files = (os.environ.get('PROGRAMFILES(X86)') or
        if not program_files:
            return []
        vswhere = os.path.join(program_files, 'Microsoft Visual Studio',
                               'Installer', 'vswhere.exe')
        if not os.path.exists(vswhere):
            return []
        return json.loads(check_cmd_output(vswhere, '-format', 'json', *args))

    for install in vswhere(['-products', '*', '-requires', 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64']):
        path = install['installationPath']
        tools_version = open(os.path.join(
            path, r'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt'), 'rb').read().strip()
        tools_path = os.path.join(
            path, r'VC\Tools\MSVC', tools_version)
        yield (Version(install['installationVersion']), tools_path)

def host_is_windows(host):
    if host.kernel == 'WINNT':
        return True

js_option('--with-visual-studio-version', nargs=1,
          choices=('2017',), when=host_is_windows,
          help='Select a specific Visual Studio version to use')

@depends('--with-visual-studio-version', when=host_is_windows)
def vs_major_version(value):
    if value:
        return {'2017': 15}[value[0]]

js_option(env='VC_PATH', nargs=1, when=host_is_windows,
          help='Path to the Microsoft Visual C/C++ compiler')

@depends(host, vs_major_version, check_build_environment, 'VC_PATH',
         '--with-visual-studio-version', when=host_is_windows)
@imports(_from='__builtin__', _import='sorted')
@imports(_from='operator', _import='itemgetter')
def vc_compiler_paths_for_version(host, vs_major_version, env, vc_path, vs_release_name):
    if vc_path and vs_release_name:
        die('VC_PATH and --with-visual-studio-version cannot be used together.')
    if vc_path:
        # Use an arbitrary version, it doesn't matter.
        all_versions = [(Version('15'), vc_path[0])]
        all_versions = sorted(get_vc_paths(env.topsrcdir), key=itemgetter(0))
    if not all_versions:
    if vs_major_version:
        versions = [d for (v, d) in all_versions if v.major ==
        if not versions:
            die('Visual Studio %s could not be found!' % vs_release_name)
        path = versions[0]
        # Choose the newest version.
        path = all_versions[-1][1]
    host_dir = {
        'x86_64': 'HostX64',
        'x86': 'HostX86',
    if host_dir:
        path = os.path.join(path, 'bin', host_dir)
        return {
            'x64': [os.path.join(path, 'x64')],
            # The cross toolchains require DLLs from the native x64 toolchain.
            'x86': [os.path.join(path, 'x86'), os.path.join(path, 'x64')],
            'arm64': [os.path.join(path, 'arm64'), os.path.join(path, 'x64')],

def vc_compiler_path_for(host_or_target):
    @depends(host_or_target, vc_compiler_paths_for_version,
    def vc_compiler_path(target, paths):
        vc_target = {
            'x86': 'x86',
            'x86_64': 'x64',
            'arm': 'arm',
            'aarch64': 'arm64'
        if not paths:
        return paths.get(vc_target)
    return vc_compiler_path

vc_compiler_path = vc_compiler_path_for(target)
host_vc_compiler_path = vc_compiler_path_for(host)

@imports(_from='os', _import='environ')
def original_path():
    return environ['PATH'].split(os.pathsep)

def toolchain_search_path_for(host_or_target):
    vc_path = {
        host: host_vc_compiler_path,
        target: vc_compiler_path,

    @depends(vc_path, original_path, developer_options, mozbuild_state_path)
    @imports(_from='os', _import='environ')
    def toolchain_search_path(vc_compiler_path, original_path, developer_options,
        result = list(original_path)

        if vc_compiler_path:
            # The second item, if there is one, is necessary to have in $PATH for
            # Windows to load the required DLLs from there.
            if len(vc_compiler_path) > 1:
                environ['PATH'] = os.pathsep.join(result + vc_compiler_path[1:])

            # The first item is where the programs are going to be

        # Also add in the location to which `mach bootstrap` or
        # `mach artifact toolchain` installs clang.
        bootstrapped = []

        bootstrap_clang_path = os.path.join(mozbuild_state_path, 'clang', 'bin')

        bootstrap_cbindgen_path = os.path.join(mozbuild_state_path, 'cbindgen')

        bootstrap_nasm_path = os.path.join(mozbuild_state_path, 'nasm')

        # Also add the rustup install directory for cargo/rustc.
        rustup_path = os.path.expanduser(os.path.join('~', '.cargo', 'bin'))

        if developer_options:
            return bootstrapped + result
        return result + bootstrapped
    return toolchain_search_path

toolchain_search_path = toolchain_search_path_for(target)
host_toolchain_search_path = toolchain_search_path_for(host)

# As a workaround until bug 1516228 and bug 1516253 are fixed, set the PATH
# variable for the build to contain the toolchain search path.
@depends(toolchain_search_path, host_toolchain_search_path)
@imports(_from='os', _import='environ')
def altered_path(toolchain_search_path, host_toolchain_search_path):
    path = environ['PATH'].split(os.pathsep)
    altered_path = list(toolchain_search_path)
    for p in host_toolchain_search_path:
        if p not in altered_path:
    for p in path:
        if p not in altered_path:
    return os.pathsep.join(altered_path)

set_config('PATH', altered_path)

def default_c_compilers(host_or_target, other_c_compiler=None):
    '''Template defining the set of default C compilers for the host and
    target platforms.
    `host_or_target` is either `host` or `target` (the @depends functions
    from init.configure.
    `other_c_compiler` is the `target` C compiler when `host_or_target` is `host`.
    assert host_or_target in {host, target}

    other_c_compiler = () if other_c_compiler is None else (other_c_compiler,)

    @depends(host_or_target, target, toolchain_prefix, *other_c_compiler)
    def default_c_compilers(host_or_target, target, toolchain_prefix,
        if host_or_target.kernel == 'WINNT':
            supported = types = ('clang-cl', 'gcc', 'clang')
        elif host_or_target.kernel == 'Darwin':
            types = ('clang',)
            supported = ('clang', 'gcc')
            supported = types = ('clang', 'gcc')

        info = other_c_compiler[0] if other_c_compiler else None
        if info and info.type in supported:
            # When getting default C compilers for the host, we prioritize the
            # same compiler as the target C compiler.
            prioritized = info.compiler
            if info.type == 'gcc':
                same_arch = same_arch_different_bits()
                if (target.cpu != host_or_target.cpu and
                        (target.cpu, host_or_target.cpu) not in same_arch and
                        (host_or_target.cpu, target.cpu) not in same_arch):
                    # If the target C compiler is GCC, and it can't be used with
                    # -m32/-m64 for the host, it's probably toolchain-prefixed,
                    # so we prioritize a raw 'gcc' instead.
                    prioritized = info.type

            types = [prioritized] + [t for t in types if t != info.type]

        gcc = ('gcc',)
        if toolchain_prefix and host_or_target is target:
            gcc = tuple('%sgcc' % p for p in toolchain_prefix) + gcc

        result = []
        for type in types:
            if type == 'gcc':

        return tuple(result)

    return default_c_compilers

def default_cxx_compilers(c_compiler, other_c_compiler=None, other_cxx_compiler=None):
    '''Template defining the set of default C++ compilers for the host and
    target platforms.
    `c_compiler` is the @depends function returning a Compiler instance for
    the desired platform.

    Because the build system expects the C and C++ compilers to be from the
    same compiler suite, we derive the default C++ compilers from the C
    compiler that was found if none was provided.

    We also factor in the target C++ compiler when getting the default host
    C++ compiler, using the target C++ compiler if the host and target C
    compilers are the same.

    assert (other_c_compiler is None) == (other_cxx_compiler is None)
    if other_c_compiler is not None:
        other_compilers = (other_c_compiler, other_cxx_compiler)
        other_compilers = ()

    @depends(c_compiler, *other_compilers)
    def default_cxx_compilers(c_compiler, *other_compilers):
        if other_compilers:
            other_c_compiler, other_cxx_compiler = other_compilers
            if other_c_compiler.compiler == c_compiler.compiler:
                return (other_cxx_compiler.compiler,)

        dir = os.path.dirname(c_compiler.compiler)
        file = os.path.basename(c_compiler.compiler)

        if c_compiler.type == 'gcc':
            return (os.path.join(dir, file.replace('gcc', 'g++')),)

        if c_compiler.type == 'clang':
            return (os.path.join(dir, file.replace('clang', 'clang++')),)

        return (c_compiler.compiler,)

    return default_cxx_compilers

def provided_program(env_var):
    '''Template handling cases where a program can be specified either as a
    path or as a path with applicable arguments.

    @imports(_from='itertools', _import='takewhile')
    @imports(_from='mozbuild.shellutil', _import='split', _as='shell_split')
    def provided(cmd):
        # Assume the first dash-prefixed item (and any subsequent items) are
        # command-line options, the item before the dash-prefixed item is
        # the program we're looking for, and anything before that is a wrapper
        # of some kind (e.g. sccache).
        cmd = shell_split(cmd[0])

        without_flags = list(takewhile(lambda x: not x.startswith('-'), cmd))

        return namespace(

    return provided

def prepare_flags(host_or_target, macos_sdk):
    if macos_sdk and host_or_target.os == 'OSX':
        return ['-isysroot', macos_sdk]
    return []

def compiler(language, host_or_target, c_compiler=None, other_compiler=None,
    '''Template handling the generic base checks for the compiler for the
    given `language` on the given platform (`host_or_target`).
    `host_or_target` is either `host` or `target` (the @depends functions
    from init.configure.
    When the language is 'C++', `c_compiler` is the result of the `compiler`
    template for the language 'C' for the same `host_or_target`.
    When `host_or_target` is `host`, `other_compiler` is the result of the
    `compiler` template for the same `language` for `target`.
    When `host_or_target` is `host` and the language is 'C++',
    `other_c_compiler` is the result of the `compiler` template for the
    language 'C' for `target`.
    assert host_or_target in {host, target}
    assert language in ('C', 'C++')
    assert language == 'C' or c_compiler is not None
    assert host_or_target is target or other_compiler is not None
    assert language == 'C' or host_or_target is target or \
        other_c_compiler is not None

    host_or_target_str = {
        host: 'host',
        target: 'target',

    var = {
        ('C', target): 'CC',
        ('C++', target): 'CXX',
        ('C', host): 'HOST_CC',
        ('C++', host): 'HOST_CXX',
    }[language, host_or_target]

    default_compilers = {
        'C': lambda: default_c_compilers(host_or_target, other_compiler),
        'C++': lambda: default_cxx_compilers(c_compiler, other_c_compiler, other_compiler),

    what = 'the %s %s compiler' % (host_or_target_str, language)

    option(env=var, nargs=1, help='Path to %s' % what)

    # Handle the compiler given by the user through one of the CC/CXX/HOST_CC/
    # HOST_CXX variables.
    provided_compiler = provided_program(var)

    search_path = {
        host: host_toolchain_search_path,
        target: toolchain_search_path,

    # Normally, we'd use `var` instead of `_var`, but the interaction with
    # old-configure complicates things, and for now, we a) can't take the plain
    # result from check_prog as CC/CXX/HOST_CC/HOST_CXX and b) have to let
    # old-configure AC_SUBST it (because it's autoconf doing it, not us)
    compiler = check_prog('_%s' % var, what=what, progs=default_compilers,

    @depends(compiler, provided_compiler, compiler_wrapper, host_or_target, macos_sdk)
    @checking('whether %s can be used' % what, lambda x: bool(x))
    @imports(_from='mozbuild.shellutil', _import='quote')
    def valid_compiler(compiler, provided_compiler, compiler_wrapper,
                       host_or_target, macos_sdk):
        wrapper = list(compiler_wrapper or ())
        if provided_compiler:
            provided_wrapper = list(provided_compiler.wrapper)
            # When doing a subconfigure, the compiler is set by old-configure
            # and it contains the wrappers from --with-compiler-wrapper and
            # --with-ccache.
            if provided_wrapper[:len(wrapper)] == wrapper:
                provided_wrapper = provided_wrapper[len(wrapper):]
            flags = provided_compiler.flags
            flags = []

        if not flags:
            flags = prepare_flags(host_or_target, macos_sdk)

        info = check_compiler(wrapper + [compiler] + flags, language,

        # Check that the additional flags we got are enough to not require any
        # more flags. If we get an exception, just ignore it; it's liable to be
        # invalid command-line flags, which means the compiler we're checking
        # doesn't support those command-line flags and will fail one or more of
        # the checks below.
            if info.flags:
                flags += info.flags
                info = check_compiler(wrapper + [compiler] + flags, language,
        except FatalCheckError:

        if not info.target_cpu or info.target_cpu != host_or_target.cpu:
            raise FatalCheckError(
                '%s %s compiler target CPU (%s) does not match --%s CPU (%s)'
                % (host_or_target_str.capitalize(), language,
                   info.target_cpu or 'unknown', host_or_target_str,

        if not info.target_kernel or (info.target_kernel !=
            raise FatalCheckError(
                '%s %s compiler target kernel (%s) does not match --%s kernel (%s)'
                % (host_or_target_str.capitalize(), language,
                   info.target_kernel or 'unknown', host_or_target_str,

        if not info.target_endianness or (info.target_endianness !=
            raise FatalCheckError(
                '%s %s compiler target endianness (%s) does not match --%s '
                'endianness (%s)'
                % (host_or_target_str.capitalize(), language,
                   info.target_endianness or 'unknown', host_or_target_str,

        # Compiler version checks
        # ===================================================
        # Check the compiler version here instead of in `compiler_version` so
        # that the `checking` message doesn't pretend the compiler can be used
        # to then bail out one line later.
        if info.type == 'gcc':
            if host_or_target.os == 'Android':
                raise FatalCheckError('GCC is not supported on Android.\n'
                                      'Please use clang from the Android NDK instead.')
            if info.version < '6.1.0':
                raise FatalCheckError(
                    'Only GCC 6.1 or newer is supported (found version %s).'
                    % info.version)

        if info.type == 'clang-cl':
            if info.version < '8.0.0':
                raise FatalCheckError(
                    'Only clang-cl 8.0 or newer is supported (found version %s)'
                    % info.version)

        # If you want to bump the version check here search for
        # diagnose_if above, and see the associated comment.
        if info.type == 'clang' and not info.version:
            if host_or_target.os == 'OSX':
                raise FatalCheckError(
                    'Only clang/llvm 5.0 or newer is supported.')
            raise FatalCheckError(
                'Only clang/llvm 4.0 or newer is supported.')

        if info.flags:
            raise FatalCheckError(
                'Unknown compiler or compiler not supported.')

        return namespace(

    @checking('%s version' % what)
    def compiler_version(compiler):
        return compiler.version

    if language == 'C++':
        @depends(valid_compiler, c_compiler)
        def valid_compiler(compiler, c_compiler):
            if compiler.type != c_compiler.type:
                die('The %s C compiler is %s, while the %s C++ compiler is '
                    '%s. Need to use the same compiler suite.',
                    host_or_target_str, c_compiler.type,
                    host_or_target_str, compiler.type)

            if compiler.version != c_compiler.version:
                die('The %s C compiler is version %s, while the %s C++ '
                    'compiler is version %s. Need to use the same compiler '
                    host_or_target_str, c_compiler.version,
                    host_or_target_str, compiler.version)
            return compiler

    # Set CC/CXX/HOST_CC/HOST_CXX for old-configure, which needs the wrapper
    # and the flags that were part of the user input for those variables to
    # be provided.
    add_old_configure_assignment(var, depends_if(valid_compiler)(
        lambda x: list(x.wrapper) + [x.compiler] + list(x.flags)))

    if host_or_target is target:
        add_old_configure_assignment('ac_cv_prog_%s' % var, depends_if(valid_compiler)(
            lambda x: list(x.wrapper) + [x.compiler] + list(x.flags)))
        # We check that it works in python configure already.
        add_old_configure_assignment('ac_cv_prog_%s_works' % var.lower(), 'yes')
            'ac_cv_prog_%s_cross' % var.lower(),
            depends(cross_compiling)(lambda x: 'yes' if x else 'no'))
        gcc_like = depends(valid_compiler.type)(lambda x: 'yes' if x in ('gcc', 'clang') else 'no')
        add_old_configure_assignment('ac_cv_prog_%s_g' % var.lower(), gcc_like)
        if language == 'C':
            add_old_configure_assignment('ac_cv_prog_gcc', gcc_like)
        if language == 'C++':
            add_old_configure_assignment('ac_cv_prog_gxx', gcc_like)

    # old-configure to do some of its still existing checks.
    if language == 'C':
            '%s_TYPE' % var, valid_compiler.type)
            '%s_TYPE' % var, valid_compiler.type)
            '%s_VERSION' % var, depends(valid_compiler.version)(lambda v: str(v)))

    valid_compiler = compiler_class(valid_compiler, host_or_target)

    def compiler_error():
        raise FatalCheckError('Failed compiling a simple %s source with %s'
                              % (language, what))

    valid_compiler.try_compile(check_msg='%s works' % what,

    set_config('%s_BASE_FLAGS' % var, valid_compiler.flags)

    # Set CPP/CXXCPP for both the build system and old-configure. We don't
    # need to check this works for preprocessing, because we already relied
    # on $CC -E/$CXX -E doing preprocessing work to validate the compiler
    # in the first place.
    if host_or_target is target:
        pp_var = {
            'C': 'CPP',
            'C++': 'CXXCPP',

        preprocessor = depends_if(valid_compiler)(
            lambda x: list(x.wrapper) + [x.compiler, '-E'] + list(x.flags))

        set_config(pp_var, preprocessor)
        add_old_configure_assignment(pp_var, preprocessor)

    if language == 'C':
        linker_var = {
            target: 'LD',
            host: 'HOST_LD',

        @deprecated_option(env=linker_var, nargs=1)
        def linker(value):
            if value:
                return value[0]

        def unused_linker(linker):
            if linker:
                log.warning('The value of %s is not used by this build system.'
                            % linker_var)

    return valid_compiler

c_compiler = compiler('C', target)
cxx_compiler = compiler('C++', target, c_compiler=c_compiler)
host_c_compiler = compiler('C', host, other_compiler=c_compiler)
host_cxx_compiler = compiler('C++', host, c_compiler=host_c_compiler,

# Generic compiler-based conditions.
building_with_gcc = depends(c_compiler)(lambda info: info.type == 'gcc')

@depends(cxx_compiler, ccache_prefix)
def cxx_is_icecream(info, ccache_prefix):
    if (os.path.islink(info.compiler) and os.path.basename(
            os.readlink(info.compiler)) == 'icecc'):
        return True
    if ccache_prefix and os.path.basename(ccache_prefix) == 'icecc':
        return True

set_config('CXX_IS_ICECREAM', cxx_is_icecream)

def msvs_version(info):
    # clang-cl emulates the same version scheme as cl. And MSVS_VERSION needs to
    # be set for GYP on Windows.
    if info.type == 'clang-cl':
        return '2017'

    return ''

set_config('MSVS_VERSION', msvs_version)

include('arm.configure', when=depends(target.cpu)(lambda cpu: cpu == 'arm'))

         cxx_compiler.try_run(header='#include_next <inttypes.h>'))
def check_have_mac_10_14_sdk(host, version, target, value):
    # Only an issue on Mac OS X 10.14 (and probably above).
    if host.kernel != 'Darwin' or target.kernel !='Darwin' or version < '18' or value:

    die('System inttypes.h not found. Please try running '
         '`open /Library/Developer/CommandLineTools/Packages/macOS_SDK_headers_for_macOS_10.14.pkg` '
         'and following the instructions to install the necessary headers')

         try_compile(body='static_assert(sizeof(void *) == 8, "")',
                     check_msg='for 64-bit OS'))
def check_have_64_bit(have_64_bit, compiler_have_64_bit):
    if have_64_bit != compiler_have_64_bit:
        configure_error('The target compiler does not agree with configure '
                        'about the target bitness.')

@depends(c_compiler, target)
def default_debug_flags(compiler_info, target):
    # Debug info is ON by default.
    if compiler_info.type == 'clang-cl':
        return '-Z7'
    elif target.kernel == 'WINNT' and compiler_info.type == 'clang':
        return '-g -gcodeview'
    return '-g'

       help='Debug compiler flags')

             depends_if('--enable-debug')(lambda v: v))

          help='Disable debug symbols using the given compiler flags')

           depends_if('--enable-debug-symbols')(lambda _: True))

@depends('MOZ_DEBUG_FLAGS', '--enable-debug-symbols', default_debug_flags)
def debug_flags(env_debug_flags, enable_debug_flags, default_debug_flags):
    # If MOZ_DEBUG_FLAGS is set, and --enable-debug-symbols is set to a value,
    # --enable-debug-symbols takes precedence. Note, the value of
    # --enable-debug-symbols may be implied by --enable-debug.
    if len(enable_debug_flags):
        return enable_debug_flags[0]
    if env_debug_flags:
        return env_debug_flags[0]
    return default_debug_flags

set_config('MOZ_DEBUG_FLAGS', debug_flags)
add_old_configure_assignment('MOZ_DEBUG_FLAGS', debug_flags)

def color_cflags(info):
    # We could test compiling with flags. By why incur the overhead when
    # color support should always be present in a specific toolchain
    # version?

    # Code for auto-adding this flag to compiler invocations needs to
    # determine if an existing flag isn't already present. That is likely
    # using exact string matching on the returned value. So if the return
    # value changes to e.g. "<x>=always", exact string match may fail and
    # multiple color flags could be added. So examine downstream consumers
    # before adding flags to return values.
    if info.type == 'gcc':
        return '-fdiagnostics-color'
    elif info.type == 'clang':
        return '-fcolor-diagnostics'
        return ''

set_config('COLOR_CFLAGS', color_cflags)

# Some standard library headers (notably bionic on Android) declare standard
# functions (e.g. getchar()) and also #define macros for those standard
# functions.  libc++ deals with this by doing something like the following
# (explanatory comments added):
#   #ifdef FUNC
#   // Capture the definition of FUNC.
#   inline _LIBCPP_INLINE_VISIBILITY int __libcpp_FUNC(...) { return FUNC(...); }
#   #undef FUNC
#   // Use a real inline definition.
#   inline _LIBCPP_INLINE_VISIBILITY int FUNC(...) { return _libcpp_FUNC(...); }
#   #endif
# _LIBCPP_INLINE_VISIBILITY is typically defined as:
#   __attribute__((__visibility__("hidden"), __always_inline__))
# Unfortunately, this interacts badly with our system header wrappers, as the:
#   #pragma GCC visibility push(default)
# that they do prior to including the actual system header is treated by the
# compiler as an explicit declaration of visibility on every function declared
# in the header.  Therefore, when the libc++ code above is encountered, it is
# as though the compiler has effectively seen:
#   int FUNC(...) __attribute__((__visibility__("default")));
#   int FUNC(...) __attribute__((__visibility__("hidden")));
# and the compiler complains about the mismatched visibility declarations.
# However, libc++ will only define _LIBCPP_INLINE_VISIBILITY if there is no
# existing definition.  We can therefore define it to the empty string (since
# we are properly managing visibility ourselves) and avoid this whole mess.
# Note that we don't need to do this with gcc, as libc++ detects gcc and
# effectively does the same thing we are doing here.
# _LIBCPP_ALWAYS_INLINE needs similar workarounds, since it too declares
# hidden visibility.

@depends(c_compiler, target)
def libcxx_override_visibility(c_compiler, target):
    if c_compiler.type == 'clang' and target.os == 'Android':
        return ''

set_define('_LIBCPP_INLINE_VISIBILITY', libcxx_override_visibility)
set_define('_LIBCPP_ALWAYS_INLINE', libcxx_override_visibility)
set_define('_LIBCPP_ALWAYS_INLINE_EXCEPT_GCC49', libcxx_override_visibility)

@depends(target, check_build_environment)
def visibility_flags(target, env):
    if target.os != 'WINNT':
        if target.kernel == 'Darwin':
            return ('-fvisibility=hidden', '-fvisibility-inlines-hidden')
        return ('-I%s/system_wrappers' % os.path.join(env.dist),
                '%s/config/gcc_hidden.h' % env.topsrcdir)

@depends(target, visibility_flags)
def wrap_system_includes(target, visibility_flags):
    if visibility_flags and target.kernel != 'Darwin':
        return True

           depends(visibility_flags)(lambda v: bool(v) or None))
           depends(visibility_flags)(lambda v: bool(v) or None))
set_config('WRAP_SYSTEM_INCLUDES', wrap_system_includes)
set_config('VISIBILITY_FLAGS', visibility_flags)

def depend_cflags(host_or_target_c_compiler):
    def depend_cflags(host_or_target_c_compiler):
        if host_or_target_c_compiler.type != 'clang-cl':
            return ['-MD', '-MP', '-MF $(MDDEPDIR)/$(@F).pp']
            # clang-cl doesn't accept the normal -MD -MP -MF options that clang
            # does, but the underlying cc1 binary understands how to generate
            # dependency files.  These options are based on analyzing what the
            # normal clang driver sends to cc1 when given the "correct"
            # dependency options.
            return [
                '-Xclang', '-MP',
                '-Xclang', '-dependency-file',
                '-Xclang', '$(MDDEPDIR)/$(@F).pp',
                '-Xclang', '-MT',
                '-Xclang', '$@'

    return depend_cflags

set_config('_DEPEND_CFLAGS', depend_cflags(c_compiler))
set_config('_HOST_DEPEND_CFLAGS', depend_cflags(host_c_compiler))

def preprocess_option(compiler):
    # The uses of PREPROCESS_OPTION depend on the spacing for -o/-Fi.
    if compiler.type in ('gcc', 'clang'):
        return '-E -o '
        return '-P -Fi'

set_config('PREPROCESS_OPTION', preprocess_option)

# We only want to include windows.configure when we are compiling on
# Windows, for Windows.

@depends(target, host)
def is_windows(target, host):
    return host.kernel == 'WINNT' and target.kernel == 'WINNT'

include('windows.configure', when=is_windows)

# On Power ISA, determine compiler flags for VMX, VSX and VSX-3.

           when=depends(target.cpu)(lambda cpu: cpu.startswith('ppc')))

           when=depends(target.cpu)(lambda cpu: cpu.startswith('ppc')))

           when=depends(target.cpu)(lambda cpu: cpu.startswith('ppc')))

# ==============================================================

js_option('--enable-address-sanitizer', help='Enable Address Sanitizer')

def asan():
    return True

add_old_configure_assignment('MOZ_ASAN', asan)

# ==============================================================

js_option('--enable-memory-sanitizer', help='Enable Memory Sanitizer')

def msan():
    return True

add_old_configure_assignment('MOZ_MSAN', msan)

# ==============================================================

js_option('--enable-thread-sanitizer', help='Enable Thread Sanitizer')

def tsan():
    return True

add_old_configure_assignment('MOZ_TSAN', tsan)

# ==============================================================

          help='Enable UndefinedBehavior Sanitizer')

def ubsan(options):
    default_checks = [

    checks = options if len(options) else default_checks

    return ','.join(checks)

add_old_configure_assignment('MOZ_UBSAN_CHECKS', ubsan)

          help='Enable UndefinedBehavior Sanitizer (Signed Integer Overflow Parts)')

def ub_signed_overflow_san():
    return True

add_old_configure_assignment('MOZ_SIGNED_OVERFLOW_SANITIZE', ub_signed_overflow_san)

          help='Enable UndefinedBehavior Sanitizer (Unsigned Integer Overflow Parts)')

def ub_unsigned_overflow_san():
    return True

add_old_configure_assignment('MOZ_UNSIGNED_OVERFLOW_SANITIZE', ub_unsigned_overflow_san)

# Security Hardening
# ==============================================================

option('--enable-hardening', env='MOZ_SECURITY_HARDENING',
       help='Enables security hardening compiler options')

# This function is a bit confusing. It adds or removes hardening flags in
# three stuations: if --enable-hardening is passed; if --disable-hardening
# is passed, and if no flag is passed.
# At time of this comment writing, all flags are actually added in the
# default no-flag case; making --enable-hardening the same as omitting the
# flag. --disable-hardening will omit the security flags. (However, not all
# possible security flags will be omitted by --disable-hardening, as many are
# compiler-default options we do not explicitly enable.)
@depends('--enable-hardening', '--enable-address-sanitizer',
         '--enable-optimize', c_compiler, target)
def security_hardening_cflags(hardening_flag, asan, optimize, c_compiler, target):
    compiler_is_gccish = c_compiler.type in ('gcc', 'clang')

    flags = []
    ldflags = []
    js_flags = []
    js_ldflags = []

    # ----------------------------------------------------------
    # If hardening is explicitly enabled, or not explicitly disabled
    if hardening_flag.origin == "default" or hardening_flag:
        # FORTIFY_SOURCE ------------------------------------
        # Require optimization for FORTIFY_SOURCE. See Bug 1417452
        # Also, undefine it before defining it just in case a distro adds it, see Bug 1418398
        if compiler_is_gccish and optimize and not asan:
            # Don't enable FORTIFY_SOURCE on Android on the top-level, but do enable in js/
            if target.os != 'Android':

        # fstack-protector ------------------------------------
        # Enable only if hardening is not disabled and ASAN is
        # not on as ASAN will catch the crashes for us
        # mingw-clang cross-compile toolchain has bugs with stack protector
        mingw_clang = c_compiler.type == 'clang' and target.os == 'WINNT'
        if compiler_is_gccish and not asan:
            if not mingw_clang:

        # ASLR ------------------------------------------------
        # ASLR (dynamicbase) is enabled by default in clang-cl; but the
        # mingw-clang build requires it to be explicitly enabled
        if mingw_clang:

        # Control Flow Guard (CFG) ----------------------------
        # On aarch64, this is enabled only with explicit --enable-hardening
        # (roughly: automation) due to a dependency on a patched clang-cl.
        if c_compiler.type == 'clang-cl' and c_compiler.version >= '8' and \
           (target.cpu != 'aarch64' or hardening_flag):
            # nolongjmp is needed because clang doesn't emit the CFG tables of
            # setjmp return addresses

    # ----------------------------------------------------------
    # If ASAN _is_ on, undefine FORTIFY_SOURCE just to be safe
    if asan:

    # fno-common -----------------------------------------
    # Do not merge variables for ASAN; can detect some subtle bugs
    if asan:
        # clang-cl does not recognize the flag, it must be passed down to clang
        if c_compiler.type == 'clang-cl':

    return namespace(

add_old_configure_assignment('MOZ_HARDENING_CFLAGS', security_hardening_cflags.flags)
add_old_configure_assignment('MOZ_HARDENING_LDFLAGS', security_hardening_cflags.ldflags)
add_old_configure_assignment('MOZ_HARDENING_CFLAGS_JS', security_hardening_cflags.js_flags)
add_old_configure_assignment('MOZ_HARDENING_LDFLAGS_JS', security_hardening_cflags.js_ldflags)

# Frame pointers
# ==============================================================
def frame_pointer_flags(compiler):
    if compiler.type == 'clang-cl':
        return namespace(
    return namespace(
        enable=['-fno-omit-frame-pointer', '-funwind-tables'],
        disable=['-fomit-frame-pointer', '-funwind-tables'],

@depends(moz_optimize.optimize, moz_debug, target,
         '--enable-memory-sanitizer', '--enable-address-sanitizer',
def frame_pointer_default(optimize, debug, target, msan, asan, ubsan):
    return bool(not optimize or debug or msan or asan or ubsan or \
        (target.os == 'WINNT' and target.cpu in ('x86', 'aarch64')))

js_option('--enable-frame-pointers', default=frame_pointer_default,
          help='{Enable|Disable} frame pointers')

@depends('--enable-frame-pointers', frame_pointer_flags)
def frame_pointer_flags(enable, flags):
    if enable:
        return flags.enable
    return flags.disable

set_config('MOZ_FRAMEPTR_FLAGS', frame_pointer_flags)

# nasm detection
# ==============================================================
nasm = check_prog('NASM', ['nasm'], allow_missing=True, paths=toolchain_search_path)

@checking('nasm version')
def nasm_version(nasm):
    (retcode, stdout, _) = get_cmd_output(nasm, '-v')
    if retcode:
        # mac stub binary
        return None

    version = stdout.splitlines()[0].split()[2]
    return Version(version)

def nasm_major_version(nasm_version):
    return str(nasm_version.major)

def nasm_minor_version(nasm_version):
    return str(nasm_version.minor)

set_config('NASM_MAJOR_VERSION', nasm_major_version)
set_config('NASM_MINOR_VERSION', nasm_minor_version)

@depends(nasm, target)
def nasm_asflags(nasm, target):
    if nasm:
        asflags = {
            ('OSX', 'x86'): ['-f', 'macho32'],
            ('OSX', 'x86_64'): ['-f', 'macho64'],
            ('WINNT', 'x86'): ['-f', 'win32'],
            ('WINNT', 'x86_64'): ['-f', 'win64'],
        }.get((target.os, target.cpu), None)
        if asflags is None:
            # We're assuming every x86 platform we support that's
            # not Windows or Mac is ELF.
            if target.cpu == 'x86':
                asflags = ['-f', 'elf32']
            elif target.cpu == 'x86_64':
                asflags = ['-f', 'elf64']
        return asflags

set_config('NASM_ASFLAGS', nasm_asflags)

def have_nasm(value):
    if value:
        return True

def have_yasm(yasm_asflags):
    if yasm_asflags:
        return True

set_config('HAVE_NASM', have_nasm)

set_config('HAVE_YASM', have_yasm)
# Until the YASM variable is not necessary in old-configure.
add_old_configure_assignment('YASM', have_yasm)

# clang-cl integrated assembler support
# ==============================================================
def clangcl_asflags(target):
    asflags = None
    if target.os == 'WINNT' and target.cpu == 'aarch64':
        asflags = ['--target=aarch64-windows-msvc']
    return asflags

set_config('CLANGCL_ASFLAGS', clangcl_asflags)

# Code Coverage
# ==============================================================

js_option('--enable-coverage', env='MOZ_CODE_COVERAGE',
          help='Enable code coverage')

def code_coverage(value):
    if value:
        return True

set_config('MOZ_CODE_COVERAGE', code_coverage)
set_define('MOZ_CODE_COVERAGE', code_coverage)

@depends(target, c_compiler, vc_path, check_build_environment, when=code_coverage)
@imports(_from='__builtin__', _import='open')
def coverage_cflags(target, c_compiler, vc_path, build_env):
    cflags = ['--coverage']

    if c_compiler.type in ('clang', 'clang-cl'):
        cflags += [
            '-Xclang', '-coverage-no-function-names-in-data',

    if target.os == 'WINNT' and c_compiler.type == 'clang-cl':
        # The Visual Studio directory is the parent of the Visual C++ directory.
        vs_path = os.path.dirname(vc_path)

        # We need to get the real path of Visual Studio, which can be in a
        # symlinked directory (for example, on automation).
        vs_path = mozpack.path.readlink(vs_path)
        # Since the -fprofile-exclude-files option in LLVM is a regex, we need to
        # have the same path separators.
        vs_path = vs_path.replace('/', '\\')

        cflags += [

    response_file_path = os.path.join(build_env.topobjdir, 'code_coverage_cflags')

    with open(response_file_path, 'w') as f:
        f.write(' '.join(cflags))

    return ['@{}'.format(response_file_path)]

set_config('COVERAGE_CFLAGS', coverage_cflags)

# ==============================================================

       help='Rust compiler flags')
set_config('RUSTFLAGS', depends('RUSTFLAGS')(lambda flags: flags))

# Rust compiler flags
# ==============================================================

          help='Rust compiler optimization level (-C opt-level=%s)')

# --enable-release kicks in full optimizations.
imply_option('RUSTC_OPT_LEVEL', '2', when='--enable-release')

@depends('RUSTC_OPT_LEVEL', moz_optimize)
def rustc_opt_level(opt_level_option, moz_optimize):
    if opt_level_option:
        return opt_level_option[0]
        return '1' if moz_optimize.optimize else '0'

@depends(rustc_opt_level, debug_rust, '--enable-debug-symbols', '--enable-frame-pointers')
def rust_compile_flags(opt_level, debug_rust, debug_symbols, frame_pointers):
    # Cargo currently supports only two interesting profiles for building:
    # development and release. Those map (roughly) to --enable-debug and
    # --disable-debug in Gecko, respectively.
    # But we'd also like to support an additional axis of control for
    # optimization level. Since Cargo only supports 2 profiles, we're in
    # a bit of a bind.
    # Code here derives various compiler options given other configure options.
    # The options defined here effectively override defaults specified in
    # Cargo.toml files.

    debug_assertions = None
    debug_info = None

    # opt-level=0 implies -C debug-assertions, which may not be desired
    # unless Rust debugging is enabled.
    if opt_level == '0' and not debug_rust:
        debug_assertions = False

    if debug_symbols:
        debug_info = '2'

    opts = []

    if opt_level is not None:
        opts.append('opt-level=%s' % opt_level)
    if debug_assertions is not None:
        opts.append('debug-assertions=%s' %
                    ('yes' if debug_assertions else 'no'))
    if debug_info is not None:
        opts.append('debuginfo=%s' % debug_info)
    if frame_pointers:

    flags = []
    for opt in opts:
        flags.extend(['-C', opt])

    return flags

# Rust incremental compilation
# ==============================================================

          help='Disable incremental rust compilation.')

@depends(rustc_opt_level, debug_rust, 'MOZ_AUTOMATION', code_coverage,
def cargo_incremental(opt_level, debug_rust, automation, code_coverage,
    """Return a value for the CARGO_INCREMENTAL environment variable."""

    if not enabled:
        return '0'

    # We never want to use incremental compilation in automation.  sccache
    # handles our automation use case much better than incremental compilation
    # would.
    if automation:
        return '0'

    # Coverage instrumentation doesn't play well with incremental compilation
    if code_coverage:
        return '0'

    # Incremental compilation is automatically turned on for debug builds, so
    # we don't need to do anything special here.
    if debug_rust:

    # --enable-release automatically sets -O2 for Rust code, and people can
    # set RUSTC_OPT_LEVEL to 2 or even 3 if they want to profile Rust code.
    # Let's assume that if Rust code is using -O2 or higher, we shouldn't
    # be using incremental compilation, because we'd be imposing a
    # significant runtime cost.
    if opt_level not in ('0', '1'):

    # We're clear to use incremental compilation!
    return '1'

set_config('CARGO_INCREMENTAL', cargo_incremental)

# Linker detection
# ==============================================================

def is_linker_option_enabled(target):
    if target.kernel not in ('WINNT', 'SunOS'):
        return True

       help='Enable GNU Gold Linker when it is not already the default',

imply_option('--enable-linker', 'gold', when='--enable-gold')

js_option('--enable-linker', nargs=1,
          help='Select the linker {bfd, gold, ld64, lld, lld-*}',

@depends('--enable-linker', c_compiler, developer_options, '--enable-gold',
         extra_toolchain_flags, target, when=is_linker_option_enabled)
@checking('for linker', lambda x: x.KIND)
def select_linker(linker, c_compiler, developer_options, enable_gold,
                  toolchain_flags, target):

    if linker:
        linker = linker[0]
        linker = None

    def is_valid_linker(linker):
        if target.kernel == 'Darwin':
            valid_linkers = ('ld64', 'lld')
            valid_linkers = ('bfd', 'gold', 'lld')
        if linker in valid_linkers:
            return True
        if 'lld' in valid_linkers and linker.startswith('lld-'):
            return True
        return False

    if linker and not is_valid_linker(linker):
        # Check that we are trying to use a supported linker
        die('Unsupported linker ' + linker)

    # Check the kind of linker
    version_check = ['-Wl,--version']
    cmd_base = c_compiler.wrapper + [c_compiler.compiler] + c_compiler.flags

    def try_linker(linker):
        # Generate the compiler flag
        if linker == 'ld64':
            linker_flag = ['-fuse-ld=ld']
        elif linker:
            linker_flag = ["-fuse-ld=" + linker]
            linker_flag = []
        cmd = cmd_base + linker_flag + version_check
        if toolchain_flags:
            cmd += toolchain_flags

        # ld64 doesn't have anything to print out a version. It does print out
        # "ld64: For information on command line options please use 'man ld'."
        # but that would require doing two attempts, one with --version, that
        # would fail, and another with --help.
        # Instead, abuse its LD_PRINT_OPTIONS feature to detect a message
        # specific to it on stderr when it fails to process --version.
        env = dict(os.environ)
        env['LD_PRINT_OPTIONS'] = '1'
        retcode, stdout, stderr = get_cmd_output(*cmd, env=env)
        cmd_output = stdout.decode('utf-8')
        stderr = stderr.decode('utf-8')
        if retcode == 1 and 'Logging ld64 options' in stderr:
            kind = 'ld64'

        elif retcode != 0:
            return None

        elif 'GNU ld' in cmd_output:
            # We are using the normal linker
            kind = 'bfd'

        elif 'GNU gold' in cmd_output:
            kind = 'gold'

        elif 'LLD' in cmd_output:
            kind = 'lld'

            kind = 'unknown'

        return namespace(

    result = try_linker(linker)
    if result is None:
        if linker:
            die("Could not use {} as linker".format(linker))
        die("Failed to find a linker")

    if (linker is None and enable_gold.origin == 'default' and
            developer_options and result.KIND in ('bfd', 'gold')):
        # try and use lld if available.
        tried = try_linker('lld')
        if result.KIND != 'gold' and (tried is None or tried.KIND != 'lld'):
            tried = try_linker('gold')
            if tried is None or tried.KIND != 'gold':
                tried = None
        if tried:
            result = tried

    # If an explicit linker was given, error out if what we found is different.
    if linker and not linker.startswith(result.KIND):
        die("Could not use {} as linker".format(linker))

    return result

set_config('LINKER_KIND', select_linker.KIND)

@depends_if(select_linker, macos_sdk)
def linker_ldflags(linker, macos_sdk):
    flags = list(linker.LINKER_FLAG or [])
    if macos_sdk:
        if linker.KIND == 'ld64':
            flags.append('-Wl,-syslibroot,%s' % macos_sdk)
            flags.append('-Wl,--sysroot=%s' % macos_sdk)

    return flags

add_old_configure_assignment('LINKER_LDFLAGS', linker_ldflags)

# There's a wrinkle with MinGW: linker configuration is not enabled, so
# `select_linker` is never invoked.  Hard-code around it.
@depends(select_linker, target, c_compiler)
def gcc_use_gnu_ld(select_linker, target, c_compiler):
    if select_linker is not None:
        return select_linker.KIND in ('bfd', 'gold', 'lld')
    if target.kernel == 'WINNT' and c_compiler.type == 'clang':
        return True
    return None

# GCC_USE_GNU_LD=1 means the linker is command line compatible with GNU ld.
set_config('GCC_USE_GNU_LD', gcc_use_gnu_ld)
add_old_configure_assignment('GCC_USE_GNU_LD', gcc_use_gnu_ld)

# Assembler detection
# ==============================================================

js_option(env='AS', nargs=1, help='Path to the assembler')

@depends(target, c_compiler)
def as_info(target, c_compiler):
    if c_compiler.type == 'clang-cl':
        ml = {
            'x86': 'ml',
            'x86_64': 'ml64',
            'aarch64': 'armasm64.exe',
        return namespace(
            names=(ml, )
    # When building with anything but clang-cl, we just use the C compiler as the assembler.
    return namespace(
        names=(c_compiler.compiler, )

# One would expect the assembler to be specified merely as a program.  But in
# cases where the assembler is passed down into js/, it can be specified in
# the same way as CC: a program + a list of argument flags.  We might as well
# permit the same behavior in general, even though it seems somewhat unusual.
# So we have to do the same sort of dance as we did above with
# `provided_compiler`.
provided_assembler = provided_program('AS')
assembler = check_prog('_AS', input=provided_assembler.program,
                       what='the assembler', progs=as_info.names,

@depends(as_info, assembler, provided_assembler, c_compiler)
def as_with_flags(as_info, assembler, provided_assembler, c_compiler):
    if provided_assembler:
        return provided_assembler.wrapper + \
            [provided_assembler.program] + \

    if as_info.type == 'masm':
        return assembler

    assert as_info.type == 'gcc'

    # Need to add compiler wrappers and flags as appropriate.
    return c_compiler.wrapper + [assembler] + c_compiler.flags

add_old_configure_assignment('AS', as_with_flags)
add_old_configure_assignment('ac_cv_prog_AS', as_with_flags)

@depends(assembler, c_compiler, extra_toolchain_flags)
@imports(_from='os', _import='devnull')
def gnu_as(assembler, c_compiler, toolchain_flags):
    # clang uses a compatible GNU assembler.
    if c_compiler.type == 'clang':
        return True

    if c_compiler.type == 'gcc':
        cmd = [assembler] + c_compiler.flags
        if toolchain_flags:
            cmd += toolchain_flags
        cmd += ['-Wa,--version', '-c', '-o', devnull, '-x', 'assembler', '-']
        # We don't actually have to provide any input on stdin, `Popen.communicate` will
        # close the stdin pipe.
        # clang will error if it uses its integrated assembler for this target,
        # so handle failures gracefully.
        if 'GNU' in check_cmd_output(*cmd, stdin=subprocess.PIPE, onerror=lambda: '').decode('utf-8'):
            return True

set_config('GNU_AS', gnu_as)
add_old_configure_assignment('GNU_AS', gnu_as)

@depends(as_info, target)
def as_dash_c_flag(as_info, target):
    # armasm64 doesn't understand -c.
    if as_info.type == 'masm' and target.cpu == 'aarch64':
        return ''
        return '-c'

set_config('AS_DASH_C_FLAG', as_dash_c_flag)

@depends(as_info, target)
def as_outoption(as_info, target):
    # The uses of ASOUTOPTION depend on the spacing for -o/-Fo.
    if as_info.type == 'masm' and target.cpu != 'aarch64':
        return '-Fo'

    return '-o '

set_config('ASOUTOPTION', as_outoption)

# clang plugin handling
# ==============================================================

js_option('--enable-clang-plugin', env='ENABLE_CLANG_PLUGIN',
          help="Enable building with the mozilla clang plugin")

                             depends_if('--enable-clang-plugin')(lambda _: True))

js_option('--enable-mozsearch-plugin', env='ENABLE_MOZSEARCH_PLUGIN',
          help="Enable building with the mozsearch indexer plugin")

                             depends_if('--enable-mozsearch-plugin')(lambda _: True))

# Libstdc++ compatibility hacks
# ==============================================================
js_option('--enable-stdcxx-compat', env='MOZ_STDCXX_COMPAT',
          help='Enable compatibility with older libstdc++')

def libstdcxx_version(var, compiler):
    @depends(compiler, when='--enable-stdcxx-compat')
    @checking(var, lambda v: v and "GLIBCXX_%s" % v.dotted)
    @imports(_from='mozbuild.configure.libstdcxx', _import='find_version')
    @imports(_from='__builtin__', _import='Exception')
    def version(compiler):
            result = find_version(
                compiler.wrapper + [compiler.compiler] + compiler.flags)
        except Exception:
            die("Couldn't determine libstdc++ version")
        if result:
            return namespace(

    set_config(var, version.encoded)
    return version

    '-D_GLIBCXX_USE_CXX11_ABI=0', cxx_compiler,
        'MOZ_LIBSTDCXX_TARGET_VERSION', cxx_compiler))
    '-D_GLIBCXX_USE_CXX11_ABI=0', host_cxx_compiler,
        'MOZ_LIBSTDCXX_HOST_VERSION', host_cxx_compiler))

# Support various fuzzing options
# ==============================================================
js_option('--enable-fuzzing', help='Enable fuzzing support')

def enable_fuzzing(value):
    if value:
        return True

                     check_msg='for AFL compiler',
def enable_aflfuzzer(afl):
    if afl:
        return True

def enable_libfuzzer(fuzzing, afl, c_compiler, target):
    if fuzzing and not afl and c_compiler.type == 'clang' and target.os != 'Android':
        return True

def enable_fuzzing_interfaces(fuzzing, afl, libfuzzer):
    if fuzzing and (afl or libfuzzer):
        return True

set_config('FUZZING', enable_fuzzing)
set_define('FUZZING', enable_fuzzing)

set_config('LIBFUZZER', enable_libfuzzer)
set_define('LIBFUZZER', enable_libfuzzer)
add_old_configure_assignment('LIBFUZZER', enable_libfuzzer)

set_config('FUZZING_INTERFACES', enable_fuzzing_interfaces)
set_define('FUZZING_INTERFACES', enable_fuzzing_interfaces)
add_old_configure_assignment('FUZZING_INTERFACES', enable_fuzzing_interfaces)

         check_msg='whether the C compiler supports -fsanitize=fuzzer-no-link'))
def libfuzzer_flags(value):
    if value:
        no_link_flag_supported = True
        # recommended for (and only supported by) clang >= 6
        use_flags = ['-fsanitize=fuzzer-no-link']
        no_link_flag_supported = False
        use_flags = ['-fsanitize-coverage=trace-pc-guard,trace-cmp']

    return namespace(

set_config('HAVE_LIBFUZZER_FLAG_FUZZER_NO_LINK', libfuzzer_flags.no_link_flag_supported)
set_config('LIBFUZZER_FLAGS', libfuzzer_flags.use_flags)
add_old_configure_assignment('LIBFUZZER_FLAGS', libfuzzer_flags.use_flags)

# Shared library building
# ==============================================================

# XXX: The use of makefile constructs in these variables is awful.
@depends(target, c_compiler)
def make_shared_library(target, compiler):
    if target.os == 'WINNT':
        if compiler.type == 'gcc':
            return namespace(
                mkshlib=['$(CXX)', '$(DSO_LDOPTS)', '-o', '$@'],
                mkcshlib=['$(CC)', '$(DSO_LDOPTS)', '-o', '$@'],
        elif compiler.type == 'clang':
            return namespace(
                mkshlib=['$(CXX)', '$(DSO_LDOPTS)', '-Wl,-pdb,$(LINK_PDBFILE)', '-o', '$@'],
                mkcshlib=['$(CC)', '$(DSO_LDOPTS)', '-Wl,-pdb,$(LINK_PDBFILE)', '-o', '$@'],
            linker = [
                '-NOLOGO', '-DLL',
            return namespace(

    cc = ['$(CC)', '$(COMPUTED_C_LDFLAGS)']
    cxx = ['$(CXX)', '$(COMPUTED_CXX_LDFLAGS)']
    flags = ['$(PGO_CFLAGS)', '$(DSO_PIC_CFLAGS)', '$(DSO_LDOPTS)']
    output = ['-o', '$@']

    if target.kernel == 'Darwin':
        soname = []
    elif target.os == 'NetBSD':
        soname = ['-Wl,-soname,$(DSO_SONAME)']
        assert compiler.type in ('gcc', 'clang')

        soname = ['-Wl,-h,$(DSO_SONAME)']

    return namespace(
        mkshlib=cxx + flags + soname + output,
        mkcshlib=cc + flags + soname + output,

set_config('MKSHLIB', make_shared_library.mkshlib)
set_config('MKCSHLIB', make_shared_library.mkcshlib)

@depends(c_compiler, toolchain_prefix, when=target_is_windows)
def rc_names(c_compiler, toolchain_prefix):
    if c_compiler.type in ('gcc', 'clang'):
        return tuple('%s%s' % (p, 'windres')
                     for p in ('',) + (toolchain_prefix or ()))
    return ('rc',)

check_prog('RC', rc_names, paths=sdk_bin_path)

@depends(link, toolchain_prefix)
def ar_config(link, toolchain_prefix):
    if link:  # if LINKER is set, it's either for lld-link or link
        if 'lld-link' in link:
            return namespace(
                flags=('-llvmlibthin', '-out:$@'),
            return namespace(
                flags=('-NOLOGO', '-OUT:$@'),
    return namespace(
        names=tuple('%s%s' % (p, 'ar')
                   for p in (toolchain_prefix or ()) + ('',)),
        flags=('crs', '$@'),

ar = check_prog('AR', ar_config.names, paths=toolchain_search_path)

add_old_configure_assignment('AR', ar)

set_config('AR_FLAGS', ar_config.flags)