Files
ldc/driver/targetmachine.cpp
David Nadlinger e9e4e10085 Auto-detect target attributes for -mcpu=native.
This is currently only implemented for ARM in LLVM.
2013-10-06 02:38:50 +02:00

167 lines
5.0 KiB
C++
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
//===-- targetmachine.cpp -------------------------------------------------===//
//
// LDC the LLVM D compiler
//
// This file is distributed under the BSD-style LDC license. See the LICENSE
// file for details.
//
//===----------------------------------------------------------------------===//
//
// Note: The target CPU detection logic has been adapted from Clang
// (lib/Driver/Tools.cpp).
//
//===----------------------------------------------------------------------===//
#include "driver/targetmachine.h"
#include "llvm/ADT/Triple.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "mars.h"
#include "gen/logger.h"
static std::string getX86TargetCPU(const std::string &cpu,
const llvm::Triple &triple)
{
if (!cpu.empty()) {
if (cpu != "native")
return cpu;
// FIXME: Reject attempts to use -mcpu=native unless the target matches
// the host.
std::string cpu = llvm::sys::getHostCPUName();
if (!cpu.empty() && cpu != "generic")
return cpu;
}
// Select the default CPU if none was given (or detection failed).
// Intel Macs are relatively recent, take advantage of that.
if (triple.isOSDarwin())
return triple.isArch64Bit() ? "core2" : "yonah";
// Everything else goes to x86-64 in 64-bit mode.
if (triple.isArch64Bit())
return "x86-64";
if (triple.getOSName().startswith("haiku"))
return "i586";
if (triple.getOSName().startswith("openbsd"))
return "i486";
if (triple.getOSName().startswith("bitrig"))
return "i686";
if (triple.getOSName().startswith("freebsd"))
return "i486";
if (triple.getOSName().startswith("netbsd"))
return "i486";
#if LDC_LLVM_VER >= 302
// All x86 devices running Android have core2 as their common
// denominator. This makes a better choice than pentium4.
if (triple.getEnvironment() == llvm::Triple::Android)
return "core2";
#endif
// Fallback to p4.
return "pentium4";
}
llvm::TargetMachine* createTargetMachine(
std::string targetTriple,
std::string arch,
std::string cpu,
std::vector<std::string> attrs,
ExplicitBitness::Type bitness,
llvm::Reloc::Model relocModel,
llvm::CodeModel::Model codeModel,
llvm::CodeGenOpt::Level codeGenOptLevel,
bool genDebugInfo)
{
// Determine target triple. If the user didn't explicitly specify one, use
// the one set at LLVM configure time.
llvm::Triple triple;
if (targetTriple.empty())
{
triple = llvm::Triple(llvm::sys::getDefaultTargetTriple());
// Handle -m32/-m64.
if (sizeof(void*) == 4 && bitness == ExplicitBitness::M64)
{
triple = triple.get64BitArchVariant();
}
else if (sizeof(void*) == 8 && bitness == ExplicitBitness::M32)
{
triple = triple.get32BitArchVariant();
}
}
else
{
triple = llvm::Triple(llvm::Triple::normalize(targetTriple));
}
// Look up the LLVM backend to use.
std::string errMsg;
const llvm::Target *target =
llvm::TargetRegistry::lookupTarget(arch, triple, errMsg);
if (target == 0)
{
error("%s", errMsg.c_str());
fatal();
}
// Package up features to be passed to target/subtarget.
llvm::SubtargetFeatures features;
features.getDefaultSubtargetFeatures(triple);
if (cpu == "native")
{
llvm::StringMap<bool> hostFeatures;
if (llvm::sys::getHostCPUFeatures(hostFeatures))
{
llvm::StringMapConstIterator<bool> i = hostFeatures.begin(),
end = hostFeatures.end();
for (; i != end; ++i)
features.AddFeature(i->first(), i->second);
}
}
for (unsigned i = 0; i < attrs.size(); ++i)
features.AddFeature(attrs[i]);
// With an empty CPU string, LLVM will default to the host CPU, which is
// usually not what we want (expected behavior from other compilers is
// to default to "generic").
if (triple.getArch() == llvm::Triple::x86_64 ||
triple.getArch() == llvm::Triple::x86)
{
cpu = getX86TargetCPU(cpu, triple);
}
if (Logger::enabled())
{
Logger::cout() << "Targeting CPU '" << cpu << "' with features '" <<
features.getString() << "'.\n";
}
if (triple.isMacOSX() && relocModel == llvm::Reloc::Default)
{
// OS X defaults to PIC (and as of 10.7.5/LLVM 3.1-3.3, TLS use leads
// to crashes for non-PIC code). LLVM doesn't handle this.
relocModel = llvm::Reloc::PIC_;
}
llvm::TargetOptions targetOptions;
targetOptions.NoFramePointerElim = genDebugInfo;
return target->createTargetMachine(
triple.str(),
cpu,
features.getString(),
targetOptions,
relocModel,
codeModel,
codeGenOptLevel
);
}