Added Windows support.
This commit is contained in:
parent
d03afbbd84
commit
e8f2f6e8c9
59
.hgignore
59
.hgignore
@ -1,25 +1,34 @@
|
||||
syntax: glob
|
||||
build
|
||||
extension/version_auto.h
|
||||
google-breakpad/android
|
||||
google-breakpad/codereview.settings
|
||||
google-breakpad/README.ANDROID
|
||||
google-breakpad/src/client/apple
|
||||
google-breakpad/src/client/ios
|
||||
google-breakpad/src/client/mac
|
||||
google-breakpad/src/client/solaris
|
||||
google-breakpad/src/client/windows
|
||||
google-breakpad/src/common/android
|
||||
google-breakpad/src/common/mac
|
||||
google-breakpad/src/common/solaris
|
||||
google-breakpad/src/common/testdata
|
||||
google-breakpad/src/common/tests
|
||||
google-breakpad/src/common/windows
|
||||
google-breakpad/src/processor
|
||||
google-breakpad/src/testing
|
||||
google-breakpad/src/third_party/curl
|
||||
google-breakpad/src/third_party/glog
|
||||
google-breakpad/src/third_party/libdisasm
|
||||
google-breakpad/src/third_party/linux
|
||||
google-breakpad/src/third_party/protobuf
|
||||
google-breakpad/src/tools
|
||||
syntax: regexp
|
||||
^build
|
||||
^extension/version_auto\.h
|
||||
^google-breakpad/android
|
||||
^google-breakpad/README\.ANDROID
|
||||
^google-breakpad/src/client/apple
|
||||
^google-breakpad/src/client/ios
|
||||
^google-breakpad/src/client/mac
|
||||
^google-breakpad/src/client/solaris
|
||||
^google-breakpad/src/client/windows/sender
|
||||
^google-breakpad/src/client/windows/tests
|
||||
^google-breakpad/src/client/windows/unittests
|
||||
^google-breakpad/src/common/android
|
||||
^google-breakpad/src/common/mac
|
||||
^google-breakpad/src/common/solaris
|
||||
^google-breakpad/src/common/testdata
|
||||
^google-breakpad/src/common/tests
|
||||
^google-breakpad/src/common/windows
|
||||
^google-breakpad/src/processor
|
||||
^google-breakpad/src/testing
|
||||
^google-breakpad/src/third_party/curl
|
||||
^google-breakpad/src/third_party/glog
|
||||
^google-breakpad/src/third_party/libdisasm
|
||||
^google-breakpad/src/third_party/linux
|
||||
^google-breakpad/src/third_party/protobuf
|
||||
^google-breakpad/src/tools/gyp/.*\.pyc$
|
||||
^google-breakpad/src/tools/gyp/samples
|
||||
^google-breakpad/src/tools/gyp/test
|
||||
^google-breakpad/src/tools/gyp/tools
|
||||
^google-breakpad/src/tools/linux
|
||||
^google-breakpad/src/tools/mac
|
||||
^google-breakpad/src/tools/python
|
||||
^google-breakpad/src/tools/solaris
|
||||
^google-breakpad/src/tools/windows
|
@ -3,9 +3,10 @@ import os
|
||||
import ambuild.osutil as osutil
|
||||
from ambuild.command import SymlinkCommand
|
||||
from ambuild.command import ShellCommand
|
||||
from ambuild.command import DirectCommand
|
||||
|
||||
def BuildEverything():
|
||||
if AMBuild.target['platform'] not in ['linux']:
|
||||
if AMBuild.target['platform'] not in ['linux', 'windows']:
|
||||
return
|
||||
|
||||
BuildBreakpad()
|
||||
@ -14,11 +15,23 @@ def BuildEverything():
|
||||
def BuildBreakpad():
|
||||
breakpad = AMBuild.AddJob('google-breakpad')
|
||||
|
||||
if osutil.FileExists(os.path.join(AMBuild.outputFolder, 'google-breakpad', 'src', 'client', 'linux', 'libbreakpad_client.a')):
|
||||
return
|
||||
if AMBuild.target['platform'] in ['linux']:
|
||||
if osutil.FileExists(os.path.join(AMBuild.outputFolder, 'google-breakpad', 'src', 'client', 'linux', 'libbreakpad_client.a')):
|
||||
return
|
||||
|
||||
breakpad.AddCommand(ShellCommand('CXXFLAGS=-m32 CFLAGS=-m32 CPPFLAGS=-m32 ' + os.path.join(AMBuild.sourceFolder, 'google-breakpad', 'configure')))
|
||||
breakpad.AddCommand(ShellCommand('make src/client/linux/libbreakpad_client.a'))
|
||||
breakpad.AddCommand(ShellCommand('CXXFLAGS=-m32 CFLAGS=-m32 CPPFLAGS=-m32 ' + os.path.join(AMBuild.sourceFolder, 'google-breakpad', 'configure')))
|
||||
breakpad.AddCommand(ShellCommand('make src/client/linux/libbreakpad_client.a'))
|
||||
|
||||
elif AMBuild.target['platform'] in ['windows']:
|
||||
if osutil.FileExists(os.path.join(AMBuild.sourceFolder, 'google-breakpad', 'src', 'client', 'windows', 'handler', 'Release', 'lib', 'exception_handler.lib')):
|
||||
return
|
||||
|
||||
gyp = os.path.join(AMBuild.sourceFolder, 'google-breakpad', 'src', 'tools', 'gyp', 'gyp.bat')
|
||||
gyppath = os.path.join(AMBuild.sourceFolder, 'google-breakpad', 'src', 'client', 'windows', 'handler', 'exception_handler.gyp')
|
||||
breakpad.AddCommand(DirectCommand([gyp, gyppath]))
|
||||
|
||||
slnpath = os.path.join(AMBuild.sourceFolder, 'google-breakpad', 'src', 'client', 'windows', 'handler', 'exception_handler.sln')
|
||||
breakpad.AddCommand(DirectCommand(['msbuild', slnpath, '/p:Configuration=Release']))
|
||||
|
||||
def BuildExtension():
|
||||
compiler = SM.DefaultCompiler()
|
||||
@ -32,8 +45,9 @@ def BuildExtension():
|
||||
|
||||
compiler['CXXINCLUDES'].append(os.path.join(AMBuild.sourceFolder, 'google-breakpad', 'src'))
|
||||
|
||||
compiler['POSTLINKFLAGS'].append('-lstdc++')
|
||||
compiler['POSTLINKFLAGS'].append('-pthread')
|
||||
if AMBuild.target['platform'] in ['linux']:
|
||||
compiler['POSTLINKFLAGS'].append('-lstdc++')
|
||||
compiler['POSTLINKFLAGS'].append('-pthread')
|
||||
|
||||
name = 'accelerator.ext'
|
||||
extension = AMBuild.AddJob(name)
|
||||
@ -45,13 +59,22 @@ def BuildExtension():
|
||||
'sdk/smsdk_ext.cpp'
|
||||
])
|
||||
|
||||
link = os.path.join(AMBuild.outputFolder, extension.workFolder, 'libbreakpad_client.a')
|
||||
target = os.path.join(AMBuild.outputFolder, 'google-breakpad', 'src', 'client', 'linux', 'libbreakpad_client.a')
|
||||
try:
|
||||
os.lstat(link)
|
||||
except:
|
||||
extension.AddCommand(SymlinkCommand(link, target))
|
||||
binary.AddObjectFiles(['libbreakpad_client.a'])
|
||||
if AMBuild.target['platform'] in ['linux']:
|
||||
link = os.path.join(AMBuild.outputFolder, extension.workFolder, 'libbreakpad_client.a')
|
||||
target = os.path.join(AMBuild.outputFolder, 'google-breakpad', 'src', 'client', 'linux', 'libbreakpad_client.a')
|
||||
try:
|
||||
os.lstat(link)
|
||||
except:
|
||||
extension.AddCommand(SymlinkCommand(link, target))
|
||||
binary.AddObjectFiles(['libbreakpad_client.a'])
|
||||
|
||||
elif AMBuild.target['platform'] in ['windows']:
|
||||
libs = ['exception_handler', 'common', 'crash_generation_client']
|
||||
for lib in libs:
|
||||
path = os.path.join(AMBuild.sourceFolder, 'google-breakpad', 'src', 'client', 'windows', 'handler', 'Release', 'lib', lib + '.lib')
|
||||
if os.path.isfile(path):
|
||||
binary.RelinkIfNewer(path)
|
||||
binary['POSTLINKFLAGS'].extend([path])
|
||||
|
||||
SM.AutoVersion('extension', binary)
|
||||
SM.ExtractDebugInfo(extension, binary)
|
||||
|
@ -22,11 +22,16 @@
|
||||
#include <IWebternet.h>
|
||||
#include "MemoryDownloader.h"
|
||||
|
||||
#if defined _LINUX
|
||||
#include "client/linux/handler/exception_handler.h"
|
||||
|
||||
#include <signal.h>
|
||||
#include <dirent.h>
|
||||
#include <unistd.h>
|
||||
#elif defined _WINDOWS
|
||||
#define _STDINT // ~.~
|
||||
#include "client/windows/handler/exception_handler.h"
|
||||
#endif
|
||||
|
||||
Accelerator g_accelerator;
|
||||
SMEXT_LINK(&g_accelerator);
|
||||
@ -37,6 +42,7 @@ static IThreadHandle *uploadThread;
|
||||
char buffer[255];
|
||||
google_breakpad::ExceptionHandler *handler = NULL;
|
||||
|
||||
#if defined _LINUX
|
||||
void (*SignalHandler)(int, siginfo_t *, void *);
|
||||
|
||||
const int kExceptionSignals[] = {
|
||||
@ -84,6 +90,39 @@ void OnGameFrame(bool simulating)
|
||||
sigaction(kExceptionSignals[i], &act, NULL);
|
||||
}
|
||||
}
|
||||
#elif defined _WINDOWS
|
||||
LONG CALLBACK BreakpadVectoredHandler(_In_ PEXCEPTION_POINTERS ExceptionInfo)
|
||||
{
|
||||
if (ExceptionInfo->ExceptionRecord->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
|
||||
{
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
if (handler->WriteMinidumpForException(ExceptionInfo))
|
||||
{
|
||||
// Stop the handler thread from deadlocking us.
|
||||
delete handler;
|
||||
|
||||
// Stop Valve's handler being called.
|
||||
ExceptionInfo->ExceptionRecord->ExceptionCode = EXCEPTION_BREAKPOINT;
|
||||
|
||||
return EXCEPTION_EXECUTE_HANDLER;
|
||||
} else {
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
}
|
||||
|
||||
static bool dumpCallback(const wchar_t* dump_path,
|
||||
const wchar_t* minidump_id,
|
||||
void* context,
|
||||
EXCEPTION_POINTERS* exinfo,
|
||||
MDRawAssertionInfo* assertion,
|
||||
bool succeeded)
|
||||
{
|
||||
printf("Wrote minidump to: %ls\\%ls.dmp\n", dump_path, minidump_id);
|
||||
return succeeded;
|
||||
}
|
||||
#endif
|
||||
|
||||
void UploadCrashDump(const char *path)
|
||||
{
|
||||
@ -97,6 +136,8 @@ void UploadCrashDump(const char *path)
|
||||
IWebTransfer *xfer = webternet->CreateSession();
|
||||
xfer->SetFailOnHTTPError(true);
|
||||
|
||||
printf(">>> UPLOADING %s\n", path);
|
||||
|
||||
if (!xfer->PostAndDownload("http://crash.limetech.org/submit", form, &data, NULL))
|
||||
{
|
||||
printf(">>> UPLOAD FAILED\n");
|
||||
@ -108,25 +149,31 @@ void UploadCrashDump(const char *path)
|
||||
|
||||
void Accelerator::OnCoreMapStart(edict_t *pEdictList, int edictCount, int clientMax)
|
||||
{
|
||||
DIR *dumps = opendir(buffer);
|
||||
dirent *dump;
|
||||
IDirectory *dumps = libsys->OpenDirectory(buffer);
|
||||
|
||||
char path[512];
|
||||
|
||||
while ((dump = readdir(dumps)) != NULL)
|
||||
while (dumps->MoreFiles())
|
||||
{
|
||||
if (dump->d_type == DT_DIR)
|
||||
if (!dumps->IsEntryFile())
|
||||
{
|
||||
dumps->NextEntry();
|
||||
continue;
|
||||
|
||||
printf(">>> UPLOADING %s\n", dump->d_name);
|
||||
}
|
||||
|
||||
g_pSM->Format(path, sizeof(path), "%s/%s", buffer, dump->d_name);
|
||||
|
||||
g_pSM->Format(path, sizeof(path), "%s/%s", buffer, dumps->GetEntryName());
|
||||
UploadCrashDump(path);
|
||||
|
||||
#if defined _LINUX
|
||||
unlink(path);
|
||||
#elif defined _WINDOWS
|
||||
_unlink(path);
|
||||
#endif
|
||||
|
||||
dumps->NextEntry();
|
||||
}
|
||||
|
||||
closedir(dumps);
|
||||
libsys->CloseDirectory(dumps);
|
||||
}
|
||||
|
||||
bool Accelerator::SDK_OnLoad(char *error, size_t maxlength, bool late)
|
||||
@ -134,7 +181,7 @@ bool Accelerator::SDK_OnLoad(char *error, size_t maxlength, bool late)
|
||||
sharesys->AddDependency(myself, "webternet.ext", true, true);
|
||||
SM_GET_IFACE(WEBTERNET, webternet);
|
||||
|
||||
g_pSM->BuildPath(Path_SM, buffer, 255, "data/dumps");
|
||||
g_pSM->BuildPath(Path_SM, buffer, sizeof(buffer), "data/dumps");
|
||||
|
||||
if (!libsys->IsPathDirectory(buffer))
|
||||
{
|
||||
@ -146,20 +193,39 @@ bool Accelerator::SDK_OnLoad(char *error, size_t maxlength, bool late)
|
||||
}
|
||||
}
|
||||
|
||||
#if defined _LINUX
|
||||
google_breakpad::MinidumpDescriptor descriptor(buffer);
|
||||
handler = new google_breakpad::ExceptionHandler(descriptor, NULL, dumpCallback, NULL, true, -1);
|
||||
|
||||
struct sigaction oact;
|
||||
sigaction(SIGSEGV, NULL, &oact);
|
||||
SignalHandler = oact.sa_sigaction;
|
||||
|
||||
|
||||
g_pSM->AddGameFrameHook(OnGameFrame);
|
||||
#elif defined _WINDOWS
|
||||
wchar_t *buf = new wchar_t[sizeof(buffer)];
|
||||
size_t num_chars = mbstowcs(buf, buffer, sizeof(buffer));
|
||||
|
||||
handler = new google_breakpad::ExceptionHandler(std::wstring(buf, num_chars), NULL, dumpCallback, NULL, google_breakpad::ExceptionHandler::HANDLER_ALL);
|
||||
|
||||
AddVectoredExceptionHandler(0, BreakpadVectoredHandler);
|
||||
|
||||
delete buf;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Accelerator::SDK_OnUnload()
|
||||
{
|
||||
#if defined _LINUX
|
||||
g_pSM->RemoveGameFrameHook(OnGameFrame);
|
||||
#elif defined _WINDOWS
|
||||
RemoveVectoredExceptionHandler(BreakpadVectoredHandler);
|
||||
#endif
|
||||
|
||||
delete handler;
|
||||
}
|
||||
|
||||
|
55
google-breakpad/src/client/windows/breakpad_client.gyp
Normal file
55
google-breakpad/src/client/windows/breakpad_client.gyp
Normal file
@ -0,0 +1,55 @@
|
||||
# Copyright (c) 2010, Google Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
{
|
||||
'includes': [
|
||||
'build/common.gypi',
|
||||
],
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'common',
|
||||
'type': 'static_library',
|
||||
'include_dirs': [
|
||||
'<(DEPTH)',
|
||||
],
|
||||
'direct_dependent_settings': {
|
||||
'include_dirs': [
|
||||
'<(DEPTH)',
|
||||
]
|
||||
},
|
||||
'sources': [
|
||||
'<(DEPTH)/common/windows/guid_string.cc',
|
||||
'<(DEPTH)/common/windows/guid_string.h',
|
||||
'<(DEPTH)/common/windows/http_upload.cc',
|
||||
'<(DEPTH)/common/windows/http_upload.h',
|
||||
'<(DEPTH)/common/windows/string_utils.cc',
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
1330
google-breakpad/src/client/windows/build/common.gypi
Normal file
1330
google-breakpad/src/client/windows/build/common.gypi
Normal file
File diff suppressed because it is too large
Load Diff
71
google-breakpad/src/client/windows/build/external_code.gypi
Normal file
71
google-breakpad/src/client/windows/build/external_code.gypi
Normal file
@ -0,0 +1,71 @@
|
||||
# Copyright (c) 2010, Google Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
{
|
||||
'conditions': [
|
||||
[ 'OS=="linux"', {
|
||||
'target_defaults': {
|
||||
'cflags!': [
|
||||
'-Wall',
|
||||
'-Werror',
|
||||
],
|
||||
},
|
||||
}],
|
||||
[ 'OS=="win"', {
|
||||
'target_defaults': {
|
||||
'defines': [
|
||||
'_CRT_SECURE_NO_DEPRECATE',
|
||||
'_CRT_NONSTDC_NO_WARNINGS',
|
||||
'_CRT_NONSTDC_NO_DEPRECATE',
|
||||
],
|
||||
'msvs_disabled_warnings': [4800],
|
||||
'msvs_settings': {
|
||||
'VCCLCompilerTool': {
|
||||
'WarnAsError': 'false',
|
||||
'Detect64BitPortabilityProblems': 'false',
|
||||
},
|
||||
},
|
||||
},
|
||||
}],
|
||||
[ 'OS=="mac"', {
|
||||
'target_defaults': {
|
||||
'xcode_settings': {
|
||||
'GCC_TREAT_WARNINGS_AS_ERRORS': 'NO',
|
||||
'WARNING_CFLAGS!': ['-Wall'],
|
||||
},
|
||||
},
|
||||
}],
|
||||
],
|
||||
}
|
||||
|
||||
# Local Variables:
|
||||
# tab-width:2
|
||||
# indent-tabs-mode:nil
|
||||
# End:
|
||||
# vim: set expandtab tabstop=2 shiftwidth=2:
|
@ -0,0 +1,15 @@
|
||||
{
|
||||
'msvs_settings': {
|
||||
'VCCLCompilerTool': {
|
||||
'Optimization': '2',
|
||||
'StringPooling': 'true',
|
||||
'OmitFramePointers': 'true',
|
||||
},
|
||||
'VCLinkerTool': {
|
||||
'LinkIncremental': '1',
|
||||
'OptimizeReferences': '2',
|
||||
'EnableCOMDATFolding': '2',
|
||||
'OptimizeForWindows98': '1',
|
||||
},
|
||||
},
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
'includes': ['release_defaults.gypi'],
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
{
|
||||
'includes': ['release_defaults.gypi'],
|
||||
'defines': ['OFFICIAL_BUILD'],
|
||||
'msvs_settings': {
|
||||
'VCCLCompilerTool': {
|
||||
'Optimization': '3',
|
||||
'InlineFunctionExpansion': '2',
|
||||
'EnableIntrinsicFunctions': 'true',
|
||||
'FavorSizeOrSpeed': '2',
|
||||
'OmitFramePointers': 'true',
|
||||
'EnableFiberSafeOptimizations': 'true',
|
||||
'WholeProgramOptimization': 'true',
|
||||
},
|
||||
'VCLibrarianTool': {
|
||||
'AdditionalOptions': ['/ltcg', '/expectedoutputsize:120000000'],
|
||||
},
|
||||
'VCLinkerTool': {
|
||||
'LinkTimeCodeGeneration': '1',
|
||||
},
|
||||
},
|
||||
}
|
19
google-breakpad/src/client/windows/build/release.gypi
Normal file
19
google-breakpad/src/client/windows/build/release.gypi
Normal file
@ -0,0 +1,19 @@
|
||||
{
|
||||
'conditions': [
|
||||
# Handle build types.
|
||||
['buildtype=="Dev"', {
|
||||
'includes': ['internal/release_impl.gypi'],
|
||||
}],
|
||||
['buildtype=="Official"', {
|
||||
'includes': ['internal/release_impl_official.gypi'],
|
||||
}],
|
||||
# TODO(bradnelson): may also need:
|
||||
# checksenabled
|
||||
# coverage
|
||||
# dom_stats
|
||||
# pgo_instrument
|
||||
# pgo_optimize
|
||||
# purify
|
||||
],
|
||||
}
|
||||
|
@ -0,0 +1,81 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef CLIENT_WINDOWS_COMMON_AUTO_CRITICAL_SECTION_H__
|
||||
#define CLIENT_WINDOWS_COMMON_AUTO_CRITICAL_SECTION_H__
|
||||
|
||||
#include <Windows.h>
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
// Automatically enters the critical section in the constructor and leaves
|
||||
// the critical section in the destructor.
|
||||
class AutoCriticalSection {
|
||||
public:
|
||||
// Creates a new instance with the given critical section object
|
||||
// and enters the critical section immediately.
|
||||
explicit AutoCriticalSection(CRITICAL_SECTION* cs) : cs_(cs), taken_(false) {
|
||||
assert(cs_);
|
||||
Acquire();
|
||||
}
|
||||
|
||||
// Destructor: leaves the critical section.
|
||||
~AutoCriticalSection() {
|
||||
if (taken_) {
|
||||
Release();
|
||||
}
|
||||
}
|
||||
|
||||
// Enters the critical section. Recursive Acquire() calls are not allowed.
|
||||
void Acquire() {
|
||||
assert(!taken_);
|
||||
EnterCriticalSection(cs_);
|
||||
taken_ = true;
|
||||
}
|
||||
|
||||
// Leaves the critical section. The caller should not call Release() unless
|
||||
// the critical seciton has been entered already.
|
||||
void Release() {
|
||||
assert(taken_);
|
||||
taken_ = false;
|
||||
LeaveCriticalSection(cs_);
|
||||
}
|
||||
|
||||
private:
|
||||
// Disable copy ctor and operator=.
|
||||
AutoCriticalSection(const AutoCriticalSection&);
|
||||
AutoCriticalSection& operator=(const AutoCriticalSection&);
|
||||
|
||||
CRITICAL_SECTION* cs_;
|
||||
bool taken_;
|
||||
};
|
||||
|
||||
} // namespace google_breakpad
|
||||
|
||||
#endif // CLIENT_WINDOWS_COMMON_AUTO_CRITICAL_SECTION_H__
|
181
google-breakpad/src/client/windows/common/ipc_protocol.h
Normal file
181
google-breakpad/src/client/windows/common/ipc_protocol.h
Normal file
@ -0,0 +1,181 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef CLIENT_WINDOWS_COMMON_IPC_PROTOCOL_H__
|
||||
#define CLIENT_WINDOWS_COMMON_IPC_PROTOCOL_H__
|
||||
|
||||
#include <Windows.h>
|
||||
#include <DbgHelp.h>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include "common/windows/string_utils-inl.h"
|
||||
#include "google_breakpad/common/minidump_format.h"
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
// Name/value pair for custom client information.
|
||||
struct CustomInfoEntry {
|
||||
// Maximum length for name and value for client custom info.
|
||||
static const int kNameMaxLength = 64;
|
||||
static const int kValueMaxLength = 64;
|
||||
|
||||
CustomInfoEntry() {
|
||||
// Putting name and value in initializer list makes VC++ show warning 4351.
|
||||
set_name(NULL);
|
||||
set_value(NULL);
|
||||
}
|
||||
|
||||
CustomInfoEntry(const wchar_t* name_arg, const wchar_t* value_arg) {
|
||||
set_name(name_arg);
|
||||
set_value(value_arg);
|
||||
}
|
||||
|
||||
void set_name(const wchar_t* name_arg) {
|
||||
if (!name_arg) {
|
||||
name[0] = L'\0';
|
||||
return;
|
||||
}
|
||||
WindowsStringUtils::safe_wcscpy(name, kNameMaxLength, name_arg);
|
||||
}
|
||||
|
||||
void set_value(const wchar_t* value_arg) {
|
||||
if (!value_arg) {
|
||||
value[0] = L'\0';
|
||||
return;
|
||||
}
|
||||
|
||||
WindowsStringUtils::safe_wcscpy(value, kValueMaxLength, value_arg);
|
||||
}
|
||||
|
||||
void set(const wchar_t* name_arg, const wchar_t* value_arg) {
|
||||
set_name(name_arg);
|
||||
set_value(value_arg);
|
||||
}
|
||||
|
||||
wchar_t name[kNameMaxLength];
|
||||
wchar_t value[kValueMaxLength];
|
||||
};
|
||||
|
||||
// Constants for the protocol between client and the server.
|
||||
|
||||
// Tags sent with each message indicating the purpose of
|
||||
// the message.
|
||||
enum MessageTag {
|
||||
MESSAGE_TAG_NONE = 0,
|
||||
MESSAGE_TAG_REGISTRATION_REQUEST = 1,
|
||||
MESSAGE_TAG_REGISTRATION_RESPONSE = 2,
|
||||
MESSAGE_TAG_REGISTRATION_ACK = 3,
|
||||
MESSAGE_TAG_UPLOAD_REQUEST = 4
|
||||
};
|
||||
|
||||
struct CustomClientInfo {
|
||||
const CustomInfoEntry* entries;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
// Message structure for IPC between crash client and crash server.
|
||||
struct ProtocolMessage {
|
||||
ProtocolMessage()
|
||||
: tag(MESSAGE_TAG_NONE),
|
||||
id(0),
|
||||
dump_type(MiniDumpNormal),
|
||||
thread_id(0),
|
||||
exception_pointers(NULL),
|
||||
assert_info(NULL),
|
||||
custom_client_info(),
|
||||
dump_request_handle(NULL),
|
||||
dump_generated_handle(NULL),
|
||||
server_alive_handle(NULL) {
|
||||
}
|
||||
|
||||
// Creates an instance with the given parameters.
|
||||
ProtocolMessage(MessageTag arg_tag,
|
||||
DWORD arg_id,
|
||||
MINIDUMP_TYPE arg_dump_type,
|
||||
DWORD* arg_thread_id,
|
||||
EXCEPTION_POINTERS** arg_exception_pointers,
|
||||
MDRawAssertionInfo* arg_assert_info,
|
||||
const CustomClientInfo& custom_info,
|
||||
HANDLE arg_dump_request_handle,
|
||||
HANDLE arg_dump_generated_handle,
|
||||
HANDLE arg_server_alive)
|
||||
: tag(arg_tag),
|
||||
id(arg_id),
|
||||
dump_type(arg_dump_type),
|
||||
thread_id(arg_thread_id),
|
||||
exception_pointers(arg_exception_pointers),
|
||||
assert_info(arg_assert_info),
|
||||
custom_client_info(custom_info),
|
||||
dump_request_handle(arg_dump_request_handle),
|
||||
dump_generated_handle(arg_dump_generated_handle),
|
||||
server_alive_handle(arg_server_alive) {
|
||||
}
|
||||
|
||||
// Tag in the message.
|
||||
MessageTag tag;
|
||||
|
||||
// The id for this message. This may be either a process id or a crash id
|
||||
// depending on the type of message.
|
||||
DWORD id;
|
||||
|
||||
// Dump type requested.
|
||||
MINIDUMP_TYPE dump_type;
|
||||
|
||||
// Client thread id pointer.
|
||||
DWORD* thread_id;
|
||||
|
||||
// Exception information.
|
||||
EXCEPTION_POINTERS** exception_pointers;
|
||||
|
||||
// Assert information in case of an invalid parameter or
|
||||
// pure call failure.
|
||||
MDRawAssertionInfo* assert_info;
|
||||
|
||||
// Custom client information.
|
||||
CustomClientInfo custom_client_info;
|
||||
|
||||
// Handle to signal the crash event.
|
||||
HANDLE dump_request_handle;
|
||||
|
||||
// Handle to check if server is done generating crash.
|
||||
HANDLE dump_generated_handle;
|
||||
|
||||
// Handle to a mutex that becomes signaled (WAIT_ABANDONED)
|
||||
// if server process goes down.
|
||||
HANDLE server_alive_handle;
|
||||
|
||||
private:
|
||||
// Disable copy ctor and operator=.
|
||||
ProtocolMessage(const ProtocolMessage& msg);
|
||||
ProtocolMessage& operator=(const ProtocolMessage& msg);
|
||||
};
|
||||
|
||||
} // namespace google_breakpad
|
||||
|
||||
#endif // CLIENT_WINDOWS_COMMON_IPC_PROTOCOL_H__
|
@ -0,0 +1,58 @@
|
||||
=========================================================================
|
||||
State machine transitions for the Crash Generation Server
|
||||
=========================================================================
|
||||
|
||||
=========================================================================
|
||||
|
|
||||
STATE | ACTIONS
|
||||
|
|
||||
=========================================================================
|
||||
ERROR | Clean up resources used to serve clients.
|
||||
| Always remain in ERROR state.
|
||||
-------------------------------------------------------------------------
|
||||
INITIAL | Connect to the pipe asynchronously.
|
||||
| If connection is successfully queued up asynchronously,
|
||||
| go into CONNECTING state.
|
||||
| If connection is done synchronously, go into CONNECTED
|
||||
| state.
|
||||
| For any unexpected problems, go into ERROR state.
|
||||
-------------------------------------------------------------------------
|
||||
CONNECTING | Get the result of async connection request.
|
||||
| If I/O is still incomplete, remain in the CONNECTING
|
||||
| state.
|
||||
| If connection is complete, go into CONNECTED state.
|
||||
| For any unexpected problems, go into DISCONNECTING state.
|
||||
-------------------------------------------------------------------------
|
||||
CONNECTED | Read from the pipe asynchronously.
|
||||
| If read request is successfully queued up asynchronously,
|
||||
| go into READING state.
|
||||
| For any unexpected problems, go into DISCONNECTING state.
|
||||
-------------------------------------------------------------------------
|
||||
READING | Get the result of async read request.
|
||||
| If read is done, go into READ_DONE state.
|
||||
| For any unexpected problems, go into DISCONNECTING state.
|
||||
-------------------------------------------------------------------------
|
||||
READ_DONE | Register the client, prepare the reply and write the
|
||||
| reply to the pipe asynchronously.
|
||||
| If write request is successfully queued up asynchronously,
|
||||
| go into WRITING state.
|
||||
| For any unexpected problems, go into DISCONNECTING state.
|
||||
-------------------------------------------------------------------------
|
||||
WRITING | Get the result of the async write request.
|
||||
| If write is done, go into WRITE_DONE state.
|
||||
| For any unexpected problems, go into DISCONNECTING state.
|
||||
-------------------------------------------------------------------------
|
||||
WRITE_DONE | Read from the pipe asynchronously (for an ACK).
|
||||
| If read request is successfully queued up asynchonously,
|
||||
| go into READING_ACK state.
|
||||
| For any unexpected problems, go into DISCONNECTING state.
|
||||
-------------------------------------------------------------------------
|
||||
READING_ACK | Get the result of the async read request.
|
||||
| If read is done, perform action for successful client
|
||||
| connection.
|
||||
| Go into DISCONNECTING state.
|
||||
-------------------------------------------------------------------------
|
||||
DISCONNECTING | Disconnect from the pipe, reset the event and go into
|
||||
| INITIAL state and signal the event again. If anything
|
||||
| fails, go into ERROR state.
|
||||
=========================================================================
|
@ -0,0 +1,221 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "client/windows/crash_generation/client_info.h"
|
||||
#include "client/windows/common/ipc_protocol.h"
|
||||
|
||||
static const wchar_t kCustomInfoProcessUptimeName[] = L"ptime";
|
||||
static const size_t kMaxCustomInfoEntries = 4096;
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
ClientInfo::ClientInfo(CrashGenerationServer* crash_server,
|
||||
DWORD pid,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
DWORD* thread_id,
|
||||
EXCEPTION_POINTERS** ex_info,
|
||||
MDRawAssertionInfo* assert_info,
|
||||
const CustomClientInfo& custom_client_info)
|
||||
: crash_server_(crash_server),
|
||||
pid_(pid),
|
||||
dump_type_(dump_type),
|
||||
ex_info_(ex_info),
|
||||
assert_info_(assert_info),
|
||||
custom_client_info_(custom_client_info),
|
||||
thread_id_(thread_id),
|
||||
process_handle_(NULL),
|
||||
dump_requested_handle_(NULL),
|
||||
dump_generated_handle_(NULL),
|
||||
dump_request_wait_handle_(NULL),
|
||||
process_exit_wait_handle_(NULL),
|
||||
crash_id_(NULL) {
|
||||
GetSystemTimeAsFileTime(&start_time_);
|
||||
}
|
||||
|
||||
bool ClientInfo::Initialize() {
|
||||
process_handle_ = OpenProcess(GENERIC_ALL, FALSE, pid_);
|
||||
if (!process_handle_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// The crash_id will be the low order word of the process creation time.
|
||||
FILETIME creation_time, exit_time, kernel_time, user_time;
|
||||
if (GetProcessTimes(process_handle_, &creation_time, &exit_time,
|
||||
&kernel_time, &user_time))
|
||||
crash_id_ = creation_time.dwLowDateTime;
|
||||
|
||||
dump_requested_handle_ = CreateEvent(NULL, // Security attributes.
|
||||
TRUE, // Manual reset.
|
||||
FALSE, // Initial state.
|
||||
NULL); // Name.
|
||||
if (!dump_requested_handle_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
dump_generated_handle_ = CreateEvent(NULL, // Security attributes.
|
||||
TRUE, // Manual reset.
|
||||
FALSE, // Initial state.
|
||||
NULL); // Name.
|
||||
return dump_generated_handle_ != NULL;
|
||||
}
|
||||
|
||||
void ClientInfo::UnregisterDumpRequestWaitAndBlockUntilNoPending() {
|
||||
if (dump_request_wait_handle_) {
|
||||
// Wait for callbacks that might already be running to finish.
|
||||
UnregisterWaitEx(dump_request_wait_handle_, INVALID_HANDLE_VALUE);
|
||||
dump_request_wait_handle_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void ClientInfo::UnregisterProcessExitWait(bool block_until_no_pending) {
|
||||
if (process_exit_wait_handle_) {
|
||||
if (block_until_no_pending) {
|
||||
// Wait for the callback that might already be running to finish.
|
||||
UnregisterWaitEx(process_exit_wait_handle_, INVALID_HANDLE_VALUE);
|
||||
} else {
|
||||
UnregisterWait(process_exit_wait_handle_);
|
||||
}
|
||||
process_exit_wait_handle_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ClientInfo::~ClientInfo() {
|
||||
// Waiting for the callback to finish here is safe because ClientInfo's are
|
||||
// never destroyed from the dump request handling callback.
|
||||
UnregisterDumpRequestWaitAndBlockUntilNoPending();
|
||||
|
||||
// This is a little tricky because ClientInfo's may be destroyed by the same
|
||||
// callback (OnClientEnd) and waiting for it to finish will cause a deadlock.
|
||||
// Regardless of this complication, wait for any running callbacks to finish
|
||||
// so that the common case is properly handled. In order to avoid deadlocks,
|
||||
// the OnClientEnd callback must call UnregisterProcessExitWait(false)
|
||||
// before deleting the ClientInfo.
|
||||
UnregisterProcessExitWait(true);
|
||||
|
||||
if (process_handle_) {
|
||||
CloseHandle(process_handle_);
|
||||
}
|
||||
|
||||
if (dump_requested_handle_) {
|
||||
CloseHandle(dump_requested_handle_);
|
||||
}
|
||||
|
||||
if (dump_generated_handle_) {
|
||||
CloseHandle(dump_generated_handle_);
|
||||
}
|
||||
}
|
||||
|
||||
bool ClientInfo::GetClientExceptionInfo(EXCEPTION_POINTERS** ex_info) const {
|
||||
SIZE_T bytes_count = 0;
|
||||
if (!ReadProcessMemory(process_handle_,
|
||||
ex_info_,
|
||||
ex_info,
|
||||
sizeof(*ex_info),
|
||||
&bytes_count)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return bytes_count == sizeof(*ex_info);
|
||||
}
|
||||
|
||||
bool ClientInfo::GetClientThreadId(DWORD* thread_id) const {
|
||||
SIZE_T bytes_count = 0;
|
||||
if (!ReadProcessMemory(process_handle_,
|
||||
thread_id_,
|
||||
thread_id,
|
||||
sizeof(*thread_id),
|
||||
&bytes_count)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return bytes_count == sizeof(*thread_id);
|
||||
}
|
||||
|
||||
void ClientInfo::SetProcessUptime() {
|
||||
FILETIME now = {0};
|
||||
GetSystemTimeAsFileTime(&now);
|
||||
|
||||
ULARGE_INTEGER time_start;
|
||||
time_start.HighPart = start_time_.dwHighDateTime;
|
||||
time_start.LowPart = start_time_.dwLowDateTime;
|
||||
|
||||
ULARGE_INTEGER time_now;
|
||||
time_now.HighPart = now.dwHighDateTime;
|
||||
time_now.LowPart = now.dwLowDateTime;
|
||||
|
||||
// Calculate the delay and convert it from 100-nanoseconds to milliseconds.
|
||||
__int64 delay = (time_now.QuadPart - time_start.QuadPart) / 10 / 1000;
|
||||
|
||||
// Convert it to a string.
|
||||
wchar_t* value = custom_info_entries_.get()[custom_client_info_.count].value;
|
||||
_i64tow_s(delay, value, CustomInfoEntry::kValueMaxLength, 10);
|
||||
}
|
||||
|
||||
bool ClientInfo::PopulateCustomInfo() {
|
||||
if (custom_client_info_.count > kMaxCustomInfoEntries)
|
||||
return false;
|
||||
|
||||
SIZE_T bytes_count = 0;
|
||||
SIZE_T read_count = sizeof(CustomInfoEntry) * custom_client_info_.count;
|
||||
|
||||
// If the scoped array for custom info already has an array, it will be
|
||||
// the same size as what we need. This is because the number of custom info
|
||||
// entries is always the same. So allocate memory only if scoped array has
|
||||
// a NULL pointer.
|
||||
if (!custom_info_entries_.get()) {
|
||||
// Allocate an extra entry for reporting uptime for the client process.
|
||||
custom_info_entries_.reset(
|
||||
new CustomInfoEntry[custom_client_info_.count + 1]);
|
||||
// Use the last element in the array for uptime.
|
||||
custom_info_entries_.get()[custom_client_info_.count].set_name(
|
||||
kCustomInfoProcessUptimeName);
|
||||
}
|
||||
|
||||
if (!ReadProcessMemory(process_handle_,
|
||||
custom_client_info_.entries,
|
||||
custom_info_entries_.get(),
|
||||
read_count,
|
||||
&bytes_count)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SetProcessUptime();
|
||||
return (bytes_count != read_count);
|
||||
}
|
||||
|
||||
CustomClientInfo ClientInfo::GetCustomInfo() const {
|
||||
CustomClientInfo custom_info;
|
||||
custom_info.entries = custom_info_entries_.get();
|
||||
// Add 1 to the count from the client process to account for extra entry for
|
||||
// process uptime.
|
||||
custom_info.count = custom_client_info_.count + 1;
|
||||
return custom_info;
|
||||
}
|
||||
|
||||
} // namespace google_breakpad
|
@ -0,0 +1,174 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef CLIENT_WINDOWS_CRASH_GENERATION_CLIENT_INFO_H__
|
||||
#define CLIENT_WINDOWS_CRASH_GENERATION_CLIENT_INFO_H__
|
||||
|
||||
#include <Windows.h>
|
||||
#include <DbgHelp.h>
|
||||
#include "client/windows/common/ipc_protocol.h"
|
||||
#include "common/scoped_ptr.h"
|
||||
#include "google_breakpad/common/minidump_format.h"
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
class CrashGenerationServer;
|
||||
|
||||
// Abstraction for a crash client process.
|
||||
class ClientInfo {
|
||||
public:
|
||||
// Creates an instance with the given values. Gets the process
|
||||
// handle for the given process id and creates necessary event
|
||||
// objects.
|
||||
ClientInfo(CrashGenerationServer* crash_server,
|
||||
DWORD pid,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
DWORD* thread_id,
|
||||
EXCEPTION_POINTERS** ex_info,
|
||||
MDRawAssertionInfo* assert_info,
|
||||
const CustomClientInfo& custom_client_info);
|
||||
|
||||
~ClientInfo();
|
||||
|
||||
CrashGenerationServer* crash_server() const { return crash_server_; }
|
||||
DWORD pid() const { return pid_; }
|
||||
MINIDUMP_TYPE dump_type() const { return dump_type_; }
|
||||
EXCEPTION_POINTERS** ex_info() const { return ex_info_; }
|
||||
MDRawAssertionInfo* assert_info() const { return assert_info_; }
|
||||
DWORD* thread_id() const { return thread_id_; }
|
||||
HANDLE process_handle() const { return process_handle_; }
|
||||
HANDLE dump_requested_handle() const { return dump_requested_handle_; }
|
||||
HANDLE dump_generated_handle() const { return dump_generated_handle_; }
|
||||
DWORD crash_id() const { return crash_id_; }
|
||||
|
||||
void set_dump_request_wait_handle(HANDLE value) {
|
||||
dump_request_wait_handle_ = value;
|
||||
}
|
||||
|
||||
void set_process_exit_wait_handle(HANDLE value) {
|
||||
process_exit_wait_handle_ = value;
|
||||
}
|
||||
|
||||
// Unregister the dump request wait operation and wait for all callbacks
|
||||
// that might already be running to complete before returning.
|
||||
void UnregisterDumpRequestWaitAndBlockUntilNoPending();
|
||||
|
||||
// Unregister the process exit wait operation. If block_until_no_pending is
|
||||
// true, wait for all callbacks that might already be running to complete
|
||||
// before returning.
|
||||
void UnregisterProcessExitWait(bool block_until_no_pending);
|
||||
|
||||
bool Initialize();
|
||||
bool GetClientExceptionInfo(EXCEPTION_POINTERS** ex_info) const;
|
||||
bool GetClientThreadId(DWORD* thread_id) const;
|
||||
|
||||
// Reads the custom information from the client process address space.
|
||||
bool PopulateCustomInfo();
|
||||
|
||||
// Returns the client custom information.
|
||||
CustomClientInfo GetCustomInfo() const;
|
||||
|
||||
private:
|
||||
// Calcualtes the uptime for the client process, converts it to a string and
|
||||
// stores it in the last entry of client custom info.
|
||||
void SetProcessUptime();
|
||||
|
||||
// Crash generation server.
|
||||
CrashGenerationServer* crash_server_;
|
||||
|
||||
// Client process ID.
|
||||
DWORD pid_;
|
||||
|
||||
// Dump type requested by the client.
|
||||
MINIDUMP_TYPE dump_type_;
|
||||
|
||||
// Address of an EXCEPTION_POINTERS* variable in the client
|
||||
// process address space that will point to an instance of
|
||||
// EXCEPTION_POINTERS containing information about crash.
|
||||
//
|
||||
// WARNING: Do not dereference these pointers as they are pointers
|
||||
// in the address space of another process.
|
||||
EXCEPTION_POINTERS** ex_info_;
|
||||
|
||||
// Address of an instance of MDRawAssertionInfo in the client
|
||||
// process address space that will contain information about
|
||||
// non-exception related crashes like invalid parameter assertion
|
||||
// failures and pure calls.
|
||||
//
|
||||
// WARNING: Do not dereference these pointers as they are pointers
|
||||
// in the address space of another process.
|
||||
MDRawAssertionInfo* assert_info_;
|
||||
|
||||
// Custom information about the client.
|
||||
CustomClientInfo custom_client_info_;
|
||||
|
||||
// Contains the custom client info entries read from the client process
|
||||
// memory. This will be populated only if the method GetClientCustomInfo
|
||||
// is called.
|
||||
scoped_array<CustomInfoEntry> custom_info_entries_;
|
||||
|
||||
// Address of a variable in the client process address space that
|
||||
// will contain the thread id of the crashing client thread.
|
||||
//
|
||||
// WARNING: Do not dereference these pointers as they are pointers
|
||||
// in the address space of another process.
|
||||
DWORD* thread_id_;
|
||||
|
||||
// Client process handle.
|
||||
HANDLE process_handle_;
|
||||
|
||||
// Dump request event handle.
|
||||
HANDLE dump_requested_handle_;
|
||||
|
||||
// Dump generated event handle.
|
||||
HANDLE dump_generated_handle_;
|
||||
|
||||
// Wait handle for dump request event.
|
||||
HANDLE dump_request_wait_handle_;
|
||||
|
||||
// Wait handle for process exit event.
|
||||
HANDLE process_exit_wait_handle_;
|
||||
|
||||
// Time when the client process started. It is used to determine the uptime
|
||||
// for the client process when it signals a crash.
|
||||
FILETIME start_time_;
|
||||
|
||||
// The crash id which can be used to request an upload. This will be the
|
||||
// value of the low order dword of the process creation time for the process
|
||||
// being dumped.
|
||||
DWORD crash_id_;
|
||||
|
||||
// Disallow copy ctor and operator=.
|
||||
ClientInfo(const ClientInfo& client_info);
|
||||
ClientInfo& operator=(const ClientInfo& client_info);
|
||||
};
|
||||
|
||||
} // namespace google_breakpad
|
||||
|
||||
#endif // CLIENT_WINDOWS_CRASH_GENERATION_CLIENT_INFO_H__
|
@ -0,0 +1,64 @@
|
||||
# Copyright (c) 2010, Google Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
{
|
||||
'includes': [
|
||||
'../build/common.gypi',
|
||||
],
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'crash_generation_server',
|
||||
'type': 'static_library',
|
||||
'sources': [
|
||||
'client_info.cc',
|
||||
'crash_generation_server.cc',
|
||||
'minidump_generator.cc',
|
||||
'client_info.h',
|
||||
'crash_generation_client.h',
|
||||
'crash_generation_server.h',
|
||||
'minidump_generator.h',
|
||||
],
|
||||
'dependencies': [
|
||||
'../breakpad_client.gyp:common'
|
||||
],
|
||||
},
|
||||
{
|
||||
'target_name': 'crash_generation_client',
|
||||
'type': 'static_library',
|
||||
'include_dirs': [
|
||||
'<(DEPTH)',
|
||||
],
|
||||
'sources': [
|
||||
'crash_generation_client.h',
|
||||
'crash_generation_client.cc',
|
||||
'crash_generation_server.h',
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
@ -0,0 +1,405 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "client/windows/crash_generation/crash_generation_client.h"
|
||||
#include <cassert>
|
||||
#include <utility>
|
||||
#include "client/windows/common/ipc_protocol.h"
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
const int kPipeBusyWaitTimeoutMs = 2000;
|
||||
|
||||
#ifdef _DEBUG
|
||||
const DWORD kWaitForServerTimeoutMs = INFINITE;
|
||||
#else
|
||||
const DWORD kWaitForServerTimeoutMs = 15000;
|
||||
#endif
|
||||
|
||||
const int kPipeConnectMaxAttempts = 2;
|
||||
|
||||
const DWORD kPipeDesiredAccess = FILE_READ_DATA |
|
||||
FILE_WRITE_DATA |
|
||||
FILE_WRITE_ATTRIBUTES;
|
||||
|
||||
const DWORD kPipeFlagsAndAttributes = SECURITY_IDENTIFICATION |
|
||||
SECURITY_SQOS_PRESENT;
|
||||
|
||||
const DWORD kPipeMode = PIPE_READMODE_MESSAGE;
|
||||
|
||||
const size_t kWaitEventCount = 2;
|
||||
|
||||
// This function is orphan for production code. It can be used
|
||||
// for debugging to help repro some scenarios like the client
|
||||
// is slow in writing to the pipe after connecting, the client
|
||||
// is slow in reading from the pipe after writing, etc. The parameter
|
||||
// overlapped below is not used and it is present to match the signature
|
||||
// of this function to TransactNamedPipe Win32 API. Uncomment if needed
|
||||
// for debugging.
|
||||
/**
|
||||
static bool TransactNamedPipeDebugHelper(HANDLE pipe,
|
||||
const void* in_buffer,
|
||||
DWORD in_size,
|
||||
void* out_buffer,
|
||||
DWORD out_size,
|
||||
DWORD* bytes_count,
|
||||
LPOVERLAPPED) {
|
||||
// Uncomment the next sleep to create a gap before writing
|
||||
// to pipe.
|
||||
// Sleep(5000);
|
||||
|
||||
if (!WriteFile(pipe,
|
||||
in_buffer,
|
||||
in_size,
|
||||
bytes_count,
|
||||
NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Uncomment the next sleep to create a gap between write
|
||||
// and read.
|
||||
// Sleep(5000);
|
||||
|
||||
return ReadFile(pipe, out_buffer, out_size, bytes_count, NULL) != FALSE;
|
||||
}
|
||||
**/
|
||||
|
||||
CrashGenerationClient::CrashGenerationClient(
|
||||
const wchar_t* pipe_name,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
const CustomClientInfo* custom_info)
|
||||
: pipe_name_(pipe_name),
|
||||
pipe_handle_(NULL),
|
||||
dump_type_(dump_type),
|
||||
thread_id_(0),
|
||||
server_process_id_(0),
|
||||
crash_event_(NULL),
|
||||
crash_generated_(NULL),
|
||||
server_alive_(NULL),
|
||||
exception_pointers_(NULL),
|
||||
custom_info_() {
|
||||
memset(&assert_info_, 0, sizeof(assert_info_));
|
||||
if (custom_info) {
|
||||
custom_info_ = *custom_info;
|
||||
}
|
||||
}
|
||||
|
||||
CrashGenerationClient::CrashGenerationClient(
|
||||
HANDLE pipe_handle,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
const CustomClientInfo* custom_info)
|
||||
: pipe_name_(),
|
||||
pipe_handle_(pipe_handle),
|
||||
dump_type_(dump_type),
|
||||
thread_id_(0),
|
||||
server_process_id_(0),
|
||||
crash_event_(NULL),
|
||||
crash_generated_(NULL),
|
||||
server_alive_(NULL),
|
||||
exception_pointers_(NULL),
|
||||
custom_info_() {
|
||||
memset(&assert_info_, 0, sizeof(assert_info_));
|
||||
if (custom_info) {
|
||||
custom_info_ = *custom_info;
|
||||
}
|
||||
}
|
||||
|
||||
CrashGenerationClient::~CrashGenerationClient() {
|
||||
if (crash_event_) {
|
||||
CloseHandle(crash_event_);
|
||||
}
|
||||
|
||||
if (crash_generated_) {
|
||||
CloseHandle(crash_generated_);
|
||||
}
|
||||
|
||||
if (server_alive_) {
|
||||
CloseHandle(server_alive_);
|
||||
}
|
||||
}
|
||||
|
||||
// Performs the registration step with the server process.
|
||||
// The registration step involves communicating with the server
|
||||
// via a named pipe. The client sends the following pieces of
|
||||
// data to the server:
|
||||
//
|
||||
// * Message tag indicating the client is requesting registration.
|
||||
// * Process id of the client process.
|
||||
// * Address of a DWORD variable in the client address space
|
||||
// that will contain the thread id of the client thread that
|
||||
// caused the crash.
|
||||
// * Address of a EXCEPTION_POINTERS* variable in the client
|
||||
// address space that will point to an instance of EXCEPTION_POINTERS
|
||||
// when the crash happens.
|
||||
// * Address of an instance of MDRawAssertionInfo that will contain
|
||||
// relevant information in case of non-exception crashes like assertion
|
||||
// failures and pure calls.
|
||||
//
|
||||
// In return the client expects the following information from the server:
|
||||
//
|
||||
// * Message tag indicating successful registration.
|
||||
// * Server process id.
|
||||
// * Handle to an object that client can signal to request dump
|
||||
// generation from the server.
|
||||
// * Handle to an object that client can wait on after requesting
|
||||
// dump generation for the server to finish dump generation.
|
||||
// * Handle to a mutex object that client can wait on to make sure
|
||||
// server is still alive.
|
||||
//
|
||||
// If any step of the expected behavior mentioned above fails, the
|
||||
// registration step is not considered successful and hence out-of-process
|
||||
// dump generation service is not available.
|
||||
//
|
||||
// Returns true if the registration is successful; false otherwise.
|
||||
bool CrashGenerationClient::Register() {
|
||||
if (IsRegistered()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
HANDLE pipe = ConnectToServer();
|
||||
if (!pipe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool success = RegisterClient(pipe);
|
||||
CloseHandle(pipe);
|
||||
return success;
|
||||
}
|
||||
|
||||
bool CrashGenerationClient::RequestUpload(DWORD crash_id) {
|
||||
HANDLE pipe = ConnectToServer();
|
||||
if (!pipe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
CustomClientInfo custom_info = {NULL, 0};
|
||||
ProtocolMessage msg(MESSAGE_TAG_UPLOAD_REQUEST, crash_id,
|
||||
static_cast<MINIDUMP_TYPE>(NULL), NULL, NULL, NULL,
|
||||
custom_info, NULL, NULL, NULL);
|
||||
DWORD bytes_count = 0;
|
||||
bool success = WriteFile(pipe, &msg, sizeof(msg), &bytes_count, NULL) != 0;
|
||||
|
||||
CloseHandle(pipe);
|
||||
return success;
|
||||
}
|
||||
|
||||
HANDLE CrashGenerationClient::ConnectToServer() {
|
||||
HANDLE pipe = ConnectToPipe(pipe_name_.c_str(),
|
||||
kPipeDesiredAccess,
|
||||
kPipeFlagsAndAttributes);
|
||||
if (!pipe) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
DWORD mode = kPipeMode;
|
||||
if (!SetNamedPipeHandleState(pipe, &mode, NULL, NULL)) {
|
||||
CloseHandle(pipe);
|
||||
pipe = NULL;
|
||||
}
|
||||
|
||||
return pipe;
|
||||
}
|
||||
|
||||
bool CrashGenerationClient::RegisterClient(HANDLE pipe) {
|
||||
ProtocolMessage msg(MESSAGE_TAG_REGISTRATION_REQUEST,
|
||||
GetCurrentProcessId(),
|
||||
dump_type_,
|
||||
&thread_id_,
|
||||
&exception_pointers_,
|
||||
&assert_info_,
|
||||
custom_info_,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL);
|
||||
ProtocolMessage reply;
|
||||
DWORD bytes_count = 0;
|
||||
// The call to TransactNamedPipe below can be changed to a call
|
||||
// to TransactNamedPipeDebugHelper to help repro some scenarios.
|
||||
// For details see comments for TransactNamedPipeDebugHelper.
|
||||
if (!TransactNamedPipe(pipe,
|
||||
&msg,
|
||||
sizeof(msg),
|
||||
&reply,
|
||||
sizeof(ProtocolMessage),
|
||||
&bytes_count,
|
||||
NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ValidateResponse(reply)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ProtocolMessage ack_msg;
|
||||
ack_msg.tag = MESSAGE_TAG_REGISTRATION_ACK;
|
||||
|
||||
if (!WriteFile(pipe, &ack_msg, sizeof(ack_msg), &bytes_count, NULL)) {
|
||||
return false;
|
||||
}
|
||||
crash_event_ = reply.dump_request_handle;
|
||||
crash_generated_ = reply.dump_generated_handle;
|
||||
server_alive_ = reply.server_alive_handle;
|
||||
server_process_id_ = reply.id;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
HANDLE CrashGenerationClient::ConnectToPipe(const wchar_t* pipe_name,
|
||||
DWORD pipe_access,
|
||||
DWORD flags_attrs) {
|
||||
if (pipe_handle_) {
|
||||
HANDLE t = pipe_handle_;
|
||||
pipe_handle_ = NULL;
|
||||
return t;
|
||||
}
|
||||
|
||||
for (int i = 0; i < kPipeConnectMaxAttempts; ++i) {
|
||||
HANDLE pipe = CreateFile(pipe_name,
|
||||
pipe_access,
|
||||
0,
|
||||
NULL,
|
||||
OPEN_EXISTING,
|
||||
flags_attrs,
|
||||
NULL);
|
||||
if (pipe != INVALID_HANDLE_VALUE) {
|
||||
return pipe;
|
||||
}
|
||||
|
||||
// Cannot continue retrying if error is something other than
|
||||
// ERROR_PIPE_BUSY.
|
||||
if (GetLastError() != ERROR_PIPE_BUSY) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Cannot continue retrying if wait on pipe fails.
|
||||
if (!WaitNamedPipe(pipe_name, kPipeBusyWaitTimeoutMs)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool CrashGenerationClient::ValidateResponse(
|
||||
const ProtocolMessage& msg) const {
|
||||
return (msg.tag == MESSAGE_TAG_REGISTRATION_RESPONSE) &&
|
||||
(msg.id != 0) &&
|
||||
(msg.dump_request_handle != NULL) &&
|
||||
(msg.dump_generated_handle != NULL) &&
|
||||
(msg.server_alive_handle != NULL);
|
||||
}
|
||||
|
||||
bool CrashGenerationClient::IsRegistered() const {
|
||||
return crash_event_ != NULL;
|
||||
}
|
||||
|
||||
bool CrashGenerationClient::RequestDump(EXCEPTION_POINTERS* ex_info,
|
||||
MDRawAssertionInfo* assert_info) {
|
||||
if (!IsRegistered()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
exception_pointers_ = ex_info;
|
||||
thread_id_ = GetCurrentThreadId();
|
||||
|
||||
if (assert_info) {
|
||||
memcpy(&assert_info_, assert_info, sizeof(assert_info_));
|
||||
} else {
|
||||
memset(&assert_info_, 0, sizeof(assert_info_));
|
||||
}
|
||||
|
||||
return SignalCrashEventAndWait();
|
||||
}
|
||||
|
||||
bool CrashGenerationClient::RequestDump(EXCEPTION_POINTERS* ex_info) {
|
||||
return RequestDump(ex_info, NULL);
|
||||
}
|
||||
|
||||
bool CrashGenerationClient::RequestDump(MDRawAssertionInfo* assert_info) {
|
||||
return RequestDump(NULL, assert_info);
|
||||
}
|
||||
|
||||
bool CrashGenerationClient::SignalCrashEventAndWait() {
|
||||
assert(crash_event_);
|
||||
assert(crash_generated_);
|
||||
assert(server_alive_);
|
||||
|
||||
// Reset the dump generated event before signaling the crash
|
||||
// event so that the server can set the dump generated event
|
||||
// once it is done generating the event.
|
||||
if (!ResetEvent(crash_generated_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!SetEvent(crash_event_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
HANDLE wait_handles[kWaitEventCount] = {crash_generated_, server_alive_};
|
||||
|
||||
DWORD result = WaitForMultipleObjects(kWaitEventCount,
|
||||
wait_handles,
|
||||
FALSE,
|
||||
kWaitForServerTimeoutMs);
|
||||
|
||||
// Crash dump was successfully generated only if the server
|
||||
// signaled the crash generated event.
|
||||
return result == WAIT_OBJECT_0;
|
||||
}
|
||||
|
||||
HANDLE CrashGenerationClient::DuplicatePipeToClientProcess(const wchar_t* pipe_name,
|
||||
HANDLE hProcess) {
|
||||
for (int i = 0; i < kPipeConnectMaxAttempts; ++i) {
|
||||
HANDLE local_pipe = CreateFile(pipe_name, kPipeDesiredAccess,
|
||||
0, NULL, OPEN_EXISTING,
|
||||
kPipeFlagsAndAttributes, NULL);
|
||||
if (local_pipe != INVALID_HANDLE_VALUE) {
|
||||
HANDLE remotePipe = INVALID_HANDLE_VALUE;
|
||||
if (DuplicateHandle(GetCurrentProcess(), local_pipe,
|
||||
hProcess, &remotePipe, 0, FALSE,
|
||||
DUPLICATE_CLOSE_SOURCE | DUPLICATE_SAME_ACCESS)) {
|
||||
return remotePipe;
|
||||
} else {
|
||||
return INVALID_HANDLE_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
// Cannot continue retrying if the error wasn't a busy pipe.
|
||||
if (GetLastError() != ERROR_PIPE_BUSY) {
|
||||
return INVALID_HANDLE_VALUE;
|
||||
}
|
||||
|
||||
if (!WaitNamedPipe(pipe_name, kPipeBusyWaitTimeoutMs)) {
|
||||
return INVALID_HANDLE_VALUE;
|
||||
}
|
||||
}
|
||||
return INVALID_HANDLE_VALUE;
|
||||
}
|
||||
|
||||
} // namespace google_breakpad
|
@ -0,0 +1,182 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef CLIENT_WINDOWS_CRASH_GENERATION_CRASH_GENERATION_CLIENT_H_
|
||||
#define CLIENT_WINDOWS_CRASH_GENERATION_CRASH_GENERATION_CLIENT_H_
|
||||
|
||||
#include <windows.h>
|
||||
#include <dbghelp.h>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include "client/windows/common/ipc_protocol.h"
|
||||
#include "common/scoped_ptr.h"
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
struct CustomClientInfo;
|
||||
|
||||
// Abstraction of client-side implementation of out of process
|
||||
// crash generation.
|
||||
//
|
||||
// The process that desires to have out-of-process crash dump
|
||||
// generation service can use this class in the following way:
|
||||
//
|
||||
// * Create an instance.
|
||||
// * Call Register method so that the client tries to register
|
||||
// with the server process and check the return value. If
|
||||
// registration is not successful, out-of-process crash dump
|
||||
// generation will not be available
|
||||
// * Request dump generation by calling either of the two
|
||||
// overloaded RequestDump methods - one in case of exceptions
|
||||
// and the other in case of assertion failures
|
||||
//
|
||||
// Note that it is the responsibility of the client code of
|
||||
// this class to set the unhandled exception filter with the
|
||||
// system by calling the SetUnhandledExceptionFilter function
|
||||
// and the client code should explicitly request dump generation.
|
||||
class CrashGenerationClient {
|
||||
public:
|
||||
CrashGenerationClient(const wchar_t* pipe_name,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
const CustomClientInfo* custom_info);
|
||||
|
||||
CrashGenerationClient(HANDLE pipe_handle,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
const CustomClientInfo* custom_info);
|
||||
|
||||
~CrashGenerationClient();
|
||||
|
||||
// Registers the client process with the crash server.
|
||||
//
|
||||
// Returns true if the registration is successful; false otherwise.
|
||||
bool Register();
|
||||
|
||||
// Requests the crash server to upload a previous dump with the
|
||||
// given crash id.
|
||||
bool RequestUpload(DWORD crash_id);
|
||||
|
||||
bool RequestDump(EXCEPTION_POINTERS* ex_info,
|
||||
MDRawAssertionInfo* assert_info);
|
||||
|
||||
// Requests the crash server to generate a dump with the given
|
||||
// exception information.
|
||||
//
|
||||
// Returns true if the dump was successful; false otherwise. Note that
|
||||
// if the registration step was not performed or it was not successful,
|
||||
// false will be returned.
|
||||
bool RequestDump(EXCEPTION_POINTERS* ex_info);
|
||||
|
||||
// Requests the crash server to generate a dump with the given
|
||||
// assertion information.
|
||||
//
|
||||
// Returns true if the dump was successful; false otherwise. Note that
|
||||
// if the registration step was not performed or it was not successful,
|
||||
// false will be returned.
|
||||
bool RequestDump(MDRawAssertionInfo* assert_info);
|
||||
|
||||
// If the crash generation client is running in a sandbox that prevents it
|
||||
// from opening the named pipe directly, the server process may open the
|
||||
// handle and duplicate it into the client process with this helper method.
|
||||
// Returns INVALID_HANDLE_VALUE on failure. The process must have been opened
|
||||
// with the PROCESS_DUP_HANDLE access right.
|
||||
static HANDLE DuplicatePipeToClientProcess(const wchar_t* pipe_name,
|
||||
HANDLE hProcess);
|
||||
|
||||
private:
|
||||
// Connects to the appropriate pipe and sets the pipe handle state.
|
||||
//
|
||||
// Returns the pipe handle if everything goes well; otherwise Returns NULL.
|
||||
HANDLE ConnectToServer();
|
||||
|
||||
// Performs a handshake with the server over the given pipe which should be
|
||||
// already connected to the server.
|
||||
//
|
||||
// Returns true if handshake with the server was successful; false otherwise.
|
||||
bool RegisterClient(HANDLE pipe);
|
||||
|
||||
// Validates the given server response.
|
||||
bool ValidateResponse(const ProtocolMessage& msg) const;
|
||||
|
||||
// Returns true if the registration step succeeded; false otherwise.
|
||||
bool IsRegistered() const;
|
||||
|
||||
// Connects to the given named pipe with given parameters.
|
||||
//
|
||||
// Returns true if the connection is successful; false otherwise.
|
||||
HANDLE ConnectToPipe(const wchar_t* pipe_name,
|
||||
DWORD pipe_access,
|
||||
DWORD flags_attrs);
|
||||
|
||||
// Signals the crash event and wait for the server to generate crash.
|
||||
bool SignalCrashEventAndWait();
|
||||
|
||||
// Pipe name to use to talk to server.
|
||||
std::wstring pipe_name_;
|
||||
|
||||
// Pipe handle duplicated from server process. Only valid before
|
||||
// Register is called.
|
||||
HANDLE pipe_handle_;
|
||||
|
||||
// Custom client information
|
||||
CustomClientInfo custom_info_;
|
||||
|
||||
// Type of dump to generate.
|
||||
MINIDUMP_TYPE dump_type_;
|
||||
|
||||
// Event to signal in case of a crash.
|
||||
HANDLE crash_event_;
|
||||
|
||||
// Handle to wait on after signaling a crash for the server
|
||||
// to finish generating crash dump.
|
||||
HANDLE crash_generated_;
|
||||
|
||||
// Handle to a mutex that will become signaled with WAIT_ABANDONED
|
||||
// if the server process goes down.
|
||||
HANDLE server_alive_;
|
||||
|
||||
// Server process id.
|
||||
DWORD server_process_id_;
|
||||
|
||||
// Id of the thread that caused the crash.
|
||||
DWORD thread_id_;
|
||||
|
||||
// Exception pointers for an exception crash.
|
||||
EXCEPTION_POINTERS* exception_pointers_;
|
||||
|
||||
// Assertion info for an invalid parameter or pure call crash.
|
||||
MDRawAssertionInfo assert_info_;
|
||||
|
||||
// Disable copy ctor and operator=.
|
||||
CrashGenerationClient(const CrashGenerationClient& crash_client);
|
||||
CrashGenerationClient& operator=(const CrashGenerationClient& crash_client);
|
||||
};
|
||||
|
||||
} // namespace google_breakpad
|
||||
|
||||
#endif // CLIENT_WINDOWS_CRASH_GENERATION_CRASH_GENERATION_CLIENT_H_
|
@ -0,0 +1,935 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "client/windows/crash_generation/crash_generation_server.h"
|
||||
#include <windows.h>
|
||||
#include <cassert>
|
||||
#include <list>
|
||||
#include "client/windows/common/auto_critical_section.h"
|
||||
#include "common/scoped_ptr.h"
|
||||
|
||||
#include "client/windows/crash_generation/client_info.h"
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
// Output buffer size.
|
||||
static const size_t kOutBufferSize = 64;
|
||||
|
||||
// Input buffer size.
|
||||
static const size_t kInBufferSize = 64;
|
||||
|
||||
// Access flags for the client on the dump request event.
|
||||
static const DWORD kDumpRequestEventAccess = EVENT_MODIFY_STATE;
|
||||
|
||||
// Access flags for the client on the dump generated event.
|
||||
static const DWORD kDumpGeneratedEventAccess = EVENT_MODIFY_STATE |
|
||||
SYNCHRONIZE;
|
||||
|
||||
// Access flags for the client on the mutex.
|
||||
static const DWORD kMutexAccess = SYNCHRONIZE;
|
||||
|
||||
// Attribute flags for the pipe.
|
||||
static const DWORD kPipeAttr = FILE_FLAG_FIRST_PIPE_INSTANCE |
|
||||
PIPE_ACCESS_DUPLEX |
|
||||
FILE_FLAG_OVERLAPPED;
|
||||
|
||||
// Mode for the pipe.
|
||||
static const DWORD kPipeMode = PIPE_TYPE_MESSAGE |
|
||||
PIPE_READMODE_MESSAGE |
|
||||
PIPE_WAIT;
|
||||
|
||||
// For pipe I/O, execute the callback in the wait thread itself,
|
||||
// since the callback does very little work. The callback executes
|
||||
// the code for one of the states of the server state machine and
|
||||
// the code for all of the states perform async I/O and hence
|
||||
// finish very quickly.
|
||||
static const ULONG kPipeIOThreadFlags = WT_EXECUTEINWAITTHREAD;
|
||||
|
||||
// Dump request threads will, most likely, generate dumps. That may
|
||||
// take some time to finish, so specify WT_EXECUTELONGFUNCTION flag.
|
||||
static const ULONG kDumpRequestThreadFlags = WT_EXECUTEINWAITTHREAD |
|
||||
WT_EXECUTELONGFUNCTION;
|
||||
|
||||
static bool IsClientRequestValid(const ProtocolMessage& msg) {
|
||||
return msg.tag == MESSAGE_TAG_UPLOAD_REQUEST ||
|
||||
(msg.tag == MESSAGE_TAG_REGISTRATION_REQUEST &&
|
||||
msg.id != 0 &&
|
||||
msg.thread_id != NULL &&
|
||||
msg.exception_pointers != NULL &&
|
||||
msg.assert_info != NULL);
|
||||
}
|
||||
|
||||
CrashGenerationServer::CrashGenerationServer(
|
||||
const std::wstring& pipe_name,
|
||||
SECURITY_ATTRIBUTES* pipe_sec_attrs,
|
||||
OnClientConnectedCallback connect_callback,
|
||||
void* connect_context,
|
||||
OnClientDumpRequestCallback dump_callback,
|
||||
void* dump_context,
|
||||
OnClientExitedCallback exit_callback,
|
||||
void* exit_context,
|
||||
OnClientUploadRequestCallback upload_request_callback,
|
||||
void* upload_context,
|
||||
bool generate_dumps,
|
||||
const std::wstring* dump_path)
|
||||
: pipe_name_(pipe_name),
|
||||
pipe_sec_attrs_(pipe_sec_attrs),
|
||||
pipe_(NULL),
|
||||
pipe_wait_handle_(NULL),
|
||||
server_alive_handle_(NULL),
|
||||
connect_callback_(connect_callback),
|
||||
connect_context_(connect_context),
|
||||
dump_callback_(dump_callback),
|
||||
dump_context_(dump_context),
|
||||
exit_callback_(exit_callback),
|
||||
exit_context_(exit_context),
|
||||
upload_request_callback_(upload_request_callback),
|
||||
upload_context_(upload_context),
|
||||
generate_dumps_(generate_dumps),
|
||||
dump_generator_(NULL),
|
||||
server_state_(IPC_SERVER_STATE_UNINITIALIZED),
|
||||
shutting_down_(false),
|
||||
overlapped_(),
|
||||
client_info_(NULL) {
|
||||
InitializeCriticalSection(&sync_);
|
||||
|
||||
if (dump_path) {
|
||||
dump_generator_.reset(new MinidumpGenerator(*dump_path));
|
||||
}
|
||||
}
|
||||
|
||||
// This should never be called from the OnPipeConnected callback.
|
||||
// Otherwise the UnregisterWaitEx call below will cause a deadlock.
|
||||
CrashGenerationServer::~CrashGenerationServer() {
|
||||
// New scope to release the lock automatically.
|
||||
{
|
||||
// Make sure no clients are added or removed beyond this point.
|
||||
// Before adding or removing any clients, the critical section
|
||||
// must be entered and the shutting_down_ flag checked. The
|
||||
// critical section is then exited only after the clients_ list
|
||||
// modifications are done and the list is in a consistent state.
|
||||
AutoCriticalSection lock(&sync_);
|
||||
|
||||
// Indicate to existing threads that server is shutting down.
|
||||
shutting_down_ = true;
|
||||
}
|
||||
// No one will modify the clients_ list beyond this point -
|
||||
// not even from another thread.
|
||||
|
||||
// Even if there are no current worker threads running, it is possible that
|
||||
// an I/O request is pending on the pipe right now but not yet done.
|
||||
// In fact, it's very likely this is the case unless we are in an ERROR
|
||||
// state. If we don't wait for the pending I/O to be done, then when the I/O
|
||||
// completes, it may write to invalid memory. AppVerifier will flag this
|
||||
// problem too. So we disconnect from the pipe and then wait for the server
|
||||
// to get into error state so that the pending I/O will fail and get
|
||||
// cleared.
|
||||
DisconnectNamedPipe(pipe_);
|
||||
int num_tries = 100;
|
||||
while (num_tries-- && server_state_ != IPC_SERVER_STATE_ERROR) {
|
||||
Sleep(10);
|
||||
}
|
||||
|
||||
// Unregister wait on the pipe.
|
||||
if (pipe_wait_handle_) {
|
||||
// Wait for already executing callbacks to finish.
|
||||
UnregisterWaitEx(pipe_wait_handle_, INVALID_HANDLE_VALUE);
|
||||
}
|
||||
|
||||
// Close the pipe to avoid further client connections.
|
||||
if (pipe_) {
|
||||
CloseHandle(pipe_);
|
||||
}
|
||||
|
||||
// Request all ClientInfo objects to unregister all waits.
|
||||
// No need to enter the critical section because no one is allowed to modify
|
||||
// the clients_ list once the shutting_down_ flag is set.
|
||||
std::list<ClientInfo*>::iterator iter;
|
||||
for (iter = clients_.begin(); iter != clients_.end(); ++iter) {
|
||||
ClientInfo* client_info = *iter;
|
||||
// Unregister waits. Wait for already executing callbacks to finish.
|
||||
// Unregister the client process exit wait first and only then unregister
|
||||
// the dump request wait. The reason is that the OnClientExit callback
|
||||
// also unregisters the dump request wait and such a race (doing the same
|
||||
// unregistration from two threads) is undesirable.
|
||||
client_info->UnregisterProcessExitWait(true);
|
||||
client_info->UnregisterDumpRequestWaitAndBlockUntilNoPending();
|
||||
|
||||
// Destroying the ClientInfo here is safe because all wait operations for
|
||||
// this ClientInfo were unregistered and no pending or running callbacks
|
||||
// for this ClientInfo can possible exist (block_until_no_pending option
|
||||
// was used).
|
||||
delete client_info;
|
||||
}
|
||||
|
||||
if (server_alive_handle_) {
|
||||
// Release the mutex before closing the handle so that clients requesting
|
||||
// dumps wait for a long time for the server to generate a dump.
|
||||
ReleaseMutex(server_alive_handle_);
|
||||
CloseHandle(server_alive_handle_);
|
||||
}
|
||||
|
||||
if (overlapped_.hEvent) {
|
||||
CloseHandle(overlapped_.hEvent);
|
||||
}
|
||||
|
||||
DeleteCriticalSection(&sync_);
|
||||
}
|
||||
|
||||
bool CrashGenerationServer::Start() {
|
||||
if (server_state_ != IPC_SERVER_STATE_UNINITIALIZED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
server_state_ = IPC_SERVER_STATE_INITIAL;
|
||||
|
||||
server_alive_handle_ = CreateMutex(NULL, TRUE, NULL);
|
||||
if (!server_alive_handle_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Event to signal the client connection and pipe reads and writes.
|
||||
overlapped_.hEvent = CreateEvent(NULL, // Security descriptor.
|
||||
TRUE, // Manual reset.
|
||||
FALSE, // Initially nonsignaled.
|
||||
NULL); // Name.
|
||||
if (!overlapped_.hEvent) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Register a callback with the thread pool for the client connection.
|
||||
if (!RegisterWaitForSingleObject(&pipe_wait_handle_,
|
||||
overlapped_.hEvent,
|
||||
OnPipeConnected,
|
||||
this,
|
||||
INFINITE,
|
||||
kPipeIOThreadFlags)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
pipe_ = CreateNamedPipe(pipe_name_.c_str(),
|
||||
kPipeAttr,
|
||||
kPipeMode,
|
||||
1,
|
||||
kOutBufferSize,
|
||||
kInBufferSize,
|
||||
0,
|
||||
pipe_sec_attrs_);
|
||||
if (pipe_ == INVALID_HANDLE_VALUE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Kick-start the state machine. This will initiate an asynchronous wait
|
||||
// for client connections.
|
||||
if (!SetEvent(overlapped_.hEvent)) {
|
||||
server_state_ = IPC_SERVER_STATE_ERROR;
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we are in error state, it's because we failed to start listening.
|
||||
return true;
|
||||
}
|
||||
|
||||
// If the server thread serving clients ever gets into the
|
||||
// ERROR state, reset the event, close the pipe and remain
|
||||
// in the error state forever. Error state means something
|
||||
// that we didn't account for has happened, and it's dangerous
|
||||
// to do anything unknowingly.
|
||||
void CrashGenerationServer::HandleErrorState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_ERROR);
|
||||
|
||||
// If the server is shutting down anyway, don't clean up
|
||||
// here since shut down process will clean up.
|
||||
if (shutting_down_) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pipe_wait_handle_) {
|
||||
UnregisterWait(pipe_wait_handle_);
|
||||
pipe_wait_handle_ = NULL;
|
||||
}
|
||||
|
||||
if (pipe_) {
|
||||
CloseHandle(pipe_);
|
||||
pipe_ = NULL;
|
||||
}
|
||||
|
||||
if (overlapped_.hEvent) {
|
||||
CloseHandle(overlapped_.hEvent);
|
||||
overlapped_.hEvent = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// When the server thread serving clients is in the INITIAL state,
|
||||
// try to connect to the pipe asynchronously. If the connection
|
||||
// finishes synchronously, directly go into the CONNECTED state;
|
||||
// otherwise go into the CONNECTING state. For any problems, go
|
||||
// into the ERROR state.
|
||||
void CrashGenerationServer::HandleInitialState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_INITIAL);
|
||||
|
||||
if (!ResetEvent(overlapped_.hEvent)) {
|
||||
EnterErrorState();
|
||||
return;
|
||||
}
|
||||
|
||||
bool success = ConnectNamedPipe(pipe_, &overlapped_) != FALSE;
|
||||
DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
|
||||
|
||||
// From MSDN, it is not clear that when ConnectNamedPipe is used
|
||||
// in an overlapped mode, will it ever return non-zero value, and
|
||||
// if so, in what cases.
|
||||
assert(!success);
|
||||
|
||||
switch (error_code) {
|
||||
case ERROR_IO_PENDING:
|
||||
EnterStateWhenSignaled(IPC_SERVER_STATE_CONNECTING);
|
||||
break;
|
||||
|
||||
case ERROR_PIPE_CONNECTED:
|
||||
EnterStateImmediately(IPC_SERVER_STATE_CONNECTED);
|
||||
break;
|
||||
|
||||
default:
|
||||
EnterErrorState();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// When the server thread serving the clients is in the CONNECTING state,
|
||||
// try to get the result of the asynchronous connection request using
|
||||
// the OVERLAPPED object. If the result indicates the connection is done,
|
||||
// go into the CONNECTED state. If the result indicates I/O is still
|
||||
// INCOMPLETE, remain in the CONNECTING state. For any problems,
|
||||
// go into the DISCONNECTING state.
|
||||
void CrashGenerationServer::HandleConnectingState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_CONNECTING);
|
||||
|
||||
DWORD bytes_count = 0;
|
||||
bool success = GetOverlappedResult(pipe_,
|
||||
&overlapped_,
|
||||
&bytes_count,
|
||||
FALSE) != FALSE;
|
||||
DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
|
||||
|
||||
if (success) {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_CONNECTED);
|
||||
} else if (error_code != ERROR_IO_INCOMPLETE) {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
} else {
|
||||
// remain in CONNECTING state
|
||||
}
|
||||
}
|
||||
|
||||
// When the server thread serving the clients is in the CONNECTED state,
|
||||
// try to issue an asynchronous read from the pipe. If read completes
|
||||
// synchronously or if I/O is pending then go into the READING state.
|
||||
// For any problems, go into the DISCONNECTING state.
|
||||
void CrashGenerationServer::HandleConnectedState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_CONNECTED);
|
||||
|
||||
DWORD bytes_count = 0;
|
||||
memset(&msg_, 0, sizeof(msg_));
|
||||
bool success = ReadFile(pipe_,
|
||||
&msg_,
|
||||
sizeof(msg_),
|
||||
&bytes_count,
|
||||
&overlapped_) != FALSE;
|
||||
DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
|
||||
|
||||
// Note that the asynchronous read issued above can finish before the
|
||||
// code below executes. But, it is okay to change state after issuing
|
||||
// the asynchronous read. This is because even if the asynchronous read
|
||||
// is done, the callback for it would not be executed until the current
|
||||
// thread finishes its execution.
|
||||
if (success || error_code == ERROR_IO_PENDING) {
|
||||
EnterStateWhenSignaled(IPC_SERVER_STATE_READING);
|
||||
} else {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
}
|
||||
}
|
||||
|
||||
// When the server thread serving the clients is in the READING state,
|
||||
// try to get the result of the async read. If async read is done,
|
||||
// go into the READ_DONE state. For any problems, go into the
|
||||
// DISCONNECTING state.
|
||||
void CrashGenerationServer::HandleReadingState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_READING);
|
||||
|
||||
DWORD bytes_count = 0;
|
||||
bool success = GetOverlappedResult(pipe_,
|
||||
&overlapped_,
|
||||
&bytes_count,
|
||||
FALSE) != FALSE;
|
||||
DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
|
||||
|
||||
if (success && bytes_count == sizeof(ProtocolMessage)) {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_READ_DONE);
|
||||
} else {
|
||||
// We should never get an I/O incomplete since we should not execute this
|
||||
// unless the Read has finished and the overlapped event is signaled. If
|
||||
// we do get INCOMPLETE, we have a bug in our code.
|
||||
assert(error_code != ERROR_IO_INCOMPLETE);
|
||||
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
}
|
||||
}
|
||||
|
||||
// When the server thread serving the client is in the READ_DONE state,
|
||||
// validate the client's request message, register the client by
|
||||
// creating appropriate objects and prepare the response. Then try to
|
||||
// write the response to the pipe asynchronously. If that succeeds,
|
||||
// go into the WRITING state. For any problems, go into the DISCONNECTING
|
||||
// state.
|
||||
void CrashGenerationServer::HandleReadDoneState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_READ_DONE);
|
||||
|
||||
if (!IsClientRequestValid(msg_)) {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
return;
|
||||
}
|
||||
|
||||
if (msg_.tag == MESSAGE_TAG_UPLOAD_REQUEST) {
|
||||
if (upload_request_callback_)
|
||||
upload_request_callback_(upload_context_, msg_.id);
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
return;
|
||||
}
|
||||
|
||||
scoped_ptr<ClientInfo> client_info(
|
||||
new ClientInfo(this,
|
||||
msg_.id,
|
||||
msg_.dump_type,
|
||||
msg_.thread_id,
|
||||
msg_.exception_pointers,
|
||||
msg_.assert_info,
|
||||
msg_.custom_client_info));
|
||||
|
||||
if (!client_info->Initialize()) {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
return;
|
||||
}
|
||||
|
||||
// Issues an asynchronous WriteFile call if successful.
|
||||
// Iff successful, assigns ownership of the client_info pointer to the server
|
||||
// instance, in which case we must be sure not to free it in this function.
|
||||
if (!RespondToClient(client_info.get())) {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
return;
|
||||
}
|
||||
|
||||
// This is only valid as long as it can be found in the clients_ list
|
||||
client_info_ = client_info.release();
|
||||
|
||||
// Note that the asynchronous write issued by RespondToClient function
|
||||
// can finish before the code below executes. But it is okay to change
|
||||
// state after issuing the asynchronous write. This is because even if
|
||||
// the asynchronous write is done, the callback for it would not be
|
||||
// executed until the current thread finishes its execution.
|
||||
EnterStateWhenSignaled(IPC_SERVER_STATE_WRITING);
|
||||
}
|
||||
|
||||
// When the server thread serving the clients is in the WRITING state,
|
||||
// try to get the result of the async write. If the async write is done,
|
||||
// go into the WRITE_DONE state. For any problems, go into the
|
||||
// DISONNECTING state.
|
||||
void CrashGenerationServer::HandleWritingState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_WRITING);
|
||||
|
||||
DWORD bytes_count = 0;
|
||||
bool success = GetOverlappedResult(pipe_,
|
||||
&overlapped_,
|
||||
&bytes_count,
|
||||
FALSE) != FALSE;
|
||||
DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
|
||||
|
||||
if (success) {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_WRITE_DONE);
|
||||
return;
|
||||
}
|
||||
|
||||
// We should never get an I/O incomplete since we should not execute this
|
||||
// unless the Write has finished and the overlapped event is signaled. If
|
||||
// we do get INCOMPLETE, we have a bug in our code.
|
||||
assert(error_code != ERROR_IO_INCOMPLETE);
|
||||
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
}
|
||||
|
||||
// When the server thread serving the clients is in the WRITE_DONE state,
|
||||
// try to issue an async read on the pipe. If the read completes synchronously
|
||||
// or if I/O is still pending then go into the READING_ACK state. For any
|
||||
// issues, go into the DISCONNECTING state.
|
||||
void CrashGenerationServer::HandleWriteDoneState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_WRITE_DONE);
|
||||
|
||||
DWORD bytes_count = 0;
|
||||
bool success = ReadFile(pipe_,
|
||||
&msg_,
|
||||
sizeof(msg_),
|
||||
&bytes_count,
|
||||
&overlapped_) != FALSE;
|
||||
DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
|
||||
|
||||
if (success) {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_READING_ACK);
|
||||
} else if (error_code == ERROR_IO_PENDING) {
|
||||
EnterStateWhenSignaled(IPC_SERVER_STATE_READING_ACK);
|
||||
} else {
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
}
|
||||
}
|
||||
|
||||
// When the server thread serving the clients is in the READING_ACK state,
|
||||
// try to get result of async read. Go into the DISCONNECTING state.
|
||||
void CrashGenerationServer::HandleReadingAckState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_READING_ACK);
|
||||
|
||||
DWORD bytes_count = 0;
|
||||
bool success = GetOverlappedResult(pipe_,
|
||||
&overlapped_,
|
||||
&bytes_count,
|
||||
FALSE) != FALSE;
|
||||
DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
|
||||
|
||||
if (success) {
|
||||
// The connection handshake with the client is now complete; perform
|
||||
// the callback.
|
||||
if (connect_callback_) {
|
||||
// Note that there is only a single copy of the ClientInfo of the
|
||||
// currently connected client. However it is being referenced from
|
||||
// two different places:
|
||||
// - the client_info_ member
|
||||
// - the clients_ list
|
||||
// The lifetime of this ClientInfo depends on the lifetime of the
|
||||
// client process - basically it can go away at any time.
|
||||
// However, as long as it is referenced by the clients_ list it
|
||||
// is guaranteed to be valid. Enter the critical section and check
|
||||
// to see whether the client_info_ can be found in the list.
|
||||
// If found, execute the callback and only then leave the critical
|
||||
// section.
|
||||
AutoCriticalSection lock(&sync_);
|
||||
|
||||
bool client_is_still_alive = false;
|
||||
std::list<ClientInfo*>::iterator iter;
|
||||
for (iter = clients_.begin(); iter != clients_.end(); ++iter) {
|
||||
if (client_info_ == *iter) {
|
||||
client_is_still_alive = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (client_is_still_alive) {
|
||||
connect_callback_(connect_context_, client_info_);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We should never get an I/O incomplete since we should not execute this
|
||||
// unless the Read has finished and the overlapped event is signaled. If
|
||||
// we do get INCOMPLETE, we have a bug in our code.
|
||||
assert(error_code != ERROR_IO_INCOMPLETE);
|
||||
}
|
||||
|
||||
EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
|
||||
}
|
||||
|
||||
// When the server thread serving the client is in the DISCONNECTING state,
|
||||
// disconnect from the pipe and reset the event. If anything fails, go into
|
||||
// the ERROR state. If it goes well, go into the INITIAL state and set the
|
||||
// event to start all over again.
|
||||
void CrashGenerationServer::HandleDisconnectingState() {
|
||||
assert(server_state_ == IPC_SERVER_STATE_DISCONNECTING);
|
||||
|
||||
// Done serving the client.
|
||||
client_info_ = NULL;
|
||||
|
||||
overlapped_.Internal = NULL;
|
||||
overlapped_.InternalHigh = NULL;
|
||||
overlapped_.Offset = 0;
|
||||
overlapped_.OffsetHigh = 0;
|
||||
overlapped_.Pointer = NULL;
|
||||
|
||||
if (!ResetEvent(overlapped_.hEvent)) {
|
||||
EnterErrorState();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!DisconnectNamedPipe(pipe_)) {
|
||||
EnterErrorState();
|
||||
return;
|
||||
}
|
||||
|
||||
// If the server is shutting down do not connect to the
|
||||
// next client.
|
||||
if (shutting_down_) {
|
||||
return;
|
||||
}
|
||||
|
||||
EnterStateImmediately(IPC_SERVER_STATE_INITIAL);
|
||||
}
|
||||
|
||||
void CrashGenerationServer::EnterErrorState() {
|
||||
SetEvent(overlapped_.hEvent);
|
||||
server_state_ = IPC_SERVER_STATE_ERROR;
|
||||
}
|
||||
|
||||
void CrashGenerationServer::EnterStateWhenSignaled(IPCServerState state) {
|
||||
server_state_ = state;
|
||||
}
|
||||
|
||||
void CrashGenerationServer::EnterStateImmediately(IPCServerState state) {
|
||||
server_state_ = state;
|
||||
|
||||
if (!SetEvent(overlapped_.hEvent)) {
|
||||
server_state_ = IPC_SERVER_STATE_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
bool CrashGenerationServer::PrepareReply(const ClientInfo& client_info,
|
||||
ProtocolMessage* reply) const {
|
||||
reply->tag = MESSAGE_TAG_REGISTRATION_RESPONSE;
|
||||
reply->id = GetCurrentProcessId();
|
||||
|
||||
if (CreateClientHandles(client_info, reply)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Closing of remote handles (belonging to a different process) can
|
||||
// only be done through DuplicateHandle.
|
||||
if (reply->dump_request_handle) {
|
||||
DuplicateHandle(client_info.process_handle(), // hSourceProcessHandle
|
||||
reply->dump_request_handle, // hSourceHandle
|
||||
NULL, // hTargetProcessHandle
|
||||
0, // lpTargetHandle
|
||||
0, // dwDesiredAccess
|
||||
FALSE, // bInheritHandle
|
||||
DUPLICATE_CLOSE_SOURCE); // dwOptions
|
||||
reply->dump_request_handle = NULL;
|
||||
}
|
||||
|
||||
if (reply->dump_generated_handle) {
|
||||
DuplicateHandle(client_info.process_handle(), // hSourceProcessHandle
|
||||
reply->dump_generated_handle, // hSourceHandle
|
||||
NULL, // hTargetProcessHandle
|
||||
0, // lpTargetHandle
|
||||
0, // dwDesiredAccess
|
||||
FALSE, // bInheritHandle
|
||||
DUPLICATE_CLOSE_SOURCE); // dwOptions
|
||||
reply->dump_generated_handle = NULL;
|
||||
}
|
||||
|
||||
if (reply->server_alive_handle) {
|
||||
DuplicateHandle(client_info.process_handle(), // hSourceProcessHandle
|
||||
reply->server_alive_handle, // hSourceHandle
|
||||
NULL, // hTargetProcessHandle
|
||||
0, // lpTargetHandle
|
||||
0, // dwDesiredAccess
|
||||
FALSE, // bInheritHandle
|
||||
DUPLICATE_CLOSE_SOURCE); // dwOptions
|
||||
reply->server_alive_handle = NULL;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CrashGenerationServer::CreateClientHandles(const ClientInfo& client_info,
|
||||
ProtocolMessage* reply) const {
|
||||
HANDLE current_process = GetCurrentProcess();
|
||||
if (!DuplicateHandle(current_process,
|
||||
client_info.dump_requested_handle(),
|
||||
client_info.process_handle(),
|
||||
&reply->dump_request_handle,
|
||||
kDumpRequestEventAccess,
|
||||
FALSE,
|
||||
0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!DuplicateHandle(current_process,
|
||||
client_info.dump_generated_handle(),
|
||||
client_info.process_handle(),
|
||||
&reply->dump_generated_handle,
|
||||
kDumpGeneratedEventAccess,
|
||||
FALSE,
|
||||
0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!DuplicateHandle(current_process,
|
||||
server_alive_handle_,
|
||||
client_info.process_handle(),
|
||||
&reply->server_alive_handle,
|
||||
kMutexAccess,
|
||||
FALSE,
|
||||
0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CrashGenerationServer::RespondToClient(ClientInfo* client_info) {
|
||||
ProtocolMessage reply;
|
||||
if (!PrepareReply(*client_info, &reply)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DWORD bytes_count = 0;
|
||||
bool success = WriteFile(pipe_,
|
||||
&reply,
|
||||
sizeof(reply),
|
||||
&bytes_count,
|
||||
&overlapped_) != FALSE;
|
||||
DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
|
||||
|
||||
if (!success && error_code != ERROR_IO_PENDING) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Takes over ownership of client_info. We MUST return true if AddClient
|
||||
// succeeds.
|
||||
return AddClient(client_info);
|
||||
}
|
||||
|
||||
// The server thread servicing the clients runs this method. The method
|
||||
// implements the state machine described in ReadMe.txt along with the
|
||||
// helper methods HandleXXXState.
|
||||
void CrashGenerationServer::HandleConnectionRequest() {
|
||||
// If the server is shutting down, get into ERROR state, reset the event so
|
||||
// more workers don't run and return immediately.
|
||||
if (shutting_down_) {
|
||||
server_state_ = IPC_SERVER_STATE_ERROR;
|
||||
ResetEvent(overlapped_.hEvent);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (server_state_) {
|
||||
case IPC_SERVER_STATE_ERROR:
|
||||
HandleErrorState();
|
||||
break;
|
||||
|
||||
case IPC_SERVER_STATE_INITIAL:
|
||||
HandleInitialState();
|
||||
break;
|
||||
|
||||
case IPC_SERVER_STATE_CONNECTING:
|
||||
HandleConnectingState();
|
||||
break;
|
||||
|
||||
case IPC_SERVER_STATE_CONNECTED:
|
||||
HandleConnectedState();
|
||||
break;
|
||||
|
||||
case IPC_SERVER_STATE_READING:
|
||||
HandleReadingState();
|
||||
break;
|
||||
|
||||
case IPC_SERVER_STATE_READ_DONE:
|
||||
HandleReadDoneState();
|
||||
break;
|
||||
|
||||
case IPC_SERVER_STATE_WRITING:
|
||||
HandleWritingState();
|
||||
break;
|
||||
|
||||
case IPC_SERVER_STATE_WRITE_DONE:
|
||||
HandleWriteDoneState();
|
||||
break;
|
||||
|
||||
case IPC_SERVER_STATE_READING_ACK:
|
||||
HandleReadingAckState();
|
||||
break;
|
||||
|
||||
case IPC_SERVER_STATE_DISCONNECTING:
|
||||
HandleDisconnectingState();
|
||||
break;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
// This indicates that we added one more state without
|
||||
// adding handling code.
|
||||
server_state_ = IPC_SERVER_STATE_ERROR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool CrashGenerationServer::AddClient(ClientInfo* client_info) {
|
||||
HANDLE request_wait_handle = NULL;
|
||||
if (!RegisterWaitForSingleObject(&request_wait_handle,
|
||||
client_info->dump_requested_handle(),
|
||||
OnDumpRequest,
|
||||
client_info,
|
||||
INFINITE,
|
||||
kDumpRequestThreadFlags)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
client_info->set_dump_request_wait_handle(request_wait_handle);
|
||||
|
||||
// OnClientEnd will be called when the client process terminates.
|
||||
HANDLE process_wait_handle = NULL;
|
||||
if (!RegisterWaitForSingleObject(&process_wait_handle,
|
||||
client_info->process_handle(),
|
||||
OnClientEnd,
|
||||
client_info,
|
||||
INFINITE,
|
||||
WT_EXECUTEONLYONCE)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
client_info->set_process_exit_wait_handle(process_wait_handle);
|
||||
|
||||
// New scope to hold the lock for the shortest time.
|
||||
{
|
||||
AutoCriticalSection lock(&sync_);
|
||||
if (shutting_down_) {
|
||||
// If server is shutting down, don't add new clients
|
||||
return false;
|
||||
}
|
||||
clients_.push_back(client_info);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
void CALLBACK CrashGenerationServer::OnPipeConnected(void* context, BOOLEAN) {
|
||||
assert(context);
|
||||
|
||||
CrashGenerationServer* obj =
|
||||
reinterpret_cast<CrashGenerationServer*>(context);
|
||||
obj->HandleConnectionRequest();
|
||||
}
|
||||
|
||||
// static
|
||||
void CALLBACK CrashGenerationServer::OnDumpRequest(void* context, BOOLEAN) {
|
||||
assert(context);
|
||||
ClientInfo* client_info = reinterpret_cast<ClientInfo*>(context);
|
||||
client_info->PopulateCustomInfo();
|
||||
|
||||
CrashGenerationServer* crash_server = client_info->crash_server();
|
||||
assert(crash_server);
|
||||
crash_server->HandleDumpRequest(*client_info);
|
||||
|
||||
ResetEvent(client_info->dump_requested_handle());
|
||||
}
|
||||
|
||||
// static
|
||||
void CALLBACK CrashGenerationServer::OnClientEnd(void* context, BOOLEAN) {
|
||||
assert(context);
|
||||
ClientInfo* client_info = reinterpret_cast<ClientInfo*>(context);
|
||||
|
||||
CrashGenerationServer* crash_server = client_info->crash_server();
|
||||
assert(crash_server);
|
||||
|
||||
crash_server->HandleClientProcessExit(client_info);
|
||||
}
|
||||
|
||||
void CrashGenerationServer::HandleClientProcessExit(ClientInfo* client_info) {
|
||||
assert(client_info);
|
||||
|
||||
// Must unregister the dump request wait operation and wait for any
|
||||
// dump requests that might be pending to finish before proceeding
|
||||
// with the client_info cleanup.
|
||||
client_info->UnregisterDumpRequestWaitAndBlockUntilNoPending();
|
||||
|
||||
if (exit_callback_) {
|
||||
exit_callback_(exit_context_, client_info);
|
||||
}
|
||||
|
||||
// Start a new scope to release lock automatically.
|
||||
{
|
||||
AutoCriticalSection lock(&sync_);
|
||||
if (shutting_down_) {
|
||||
// The crash generation server is shutting down and as part of the
|
||||
// shutdown process it will delete all clients from the clients_ list.
|
||||
return;
|
||||
}
|
||||
clients_.remove(client_info);
|
||||
}
|
||||
|
||||
// Explicitly unregister the process exit wait using the non-blocking method.
|
||||
// Otherwise, the destructor will attempt to unregister it using the blocking
|
||||
// method which will lead to a deadlock because it is being called from the
|
||||
// callback of the same wait operation
|
||||
client_info->UnregisterProcessExitWait(false);
|
||||
|
||||
delete client_info;
|
||||
}
|
||||
|
||||
void CrashGenerationServer::HandleDumpRequest(const ClientInfo& client_info) {
|
||||
bool execute_callback = true;
|
||||
// Generate the dump only if it's explicitly requested by the
|
||||
// server application; otherwise the server might want to generate
|
||||
// dump in the callback.
|
||||
std::wstring dump_path;
|
||||
if (generate_dumps_) {
|
||||
if (!GenerateDump(client_info, &dump_path)) {
|
||||
// client proccess terminated or some other error
|
||||
execute_callback = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (dump_callback_ && execute_callback) {
|
||||
std::wstring* ptr_dump_path = (dump_path == L"") ? NULL : &dump_path;
|
||||
dump_callback_(dump_context_, &client_info, ptr_dump_path);
|
||||
}
|
||||
|
||||
SetEvent(client_info.dump_generated_handle());
|
||||
}
|
||||
|
||||
bool CrashGenerationServer::GenerateDump(const ClientInfo& client,
|
||||
std::wstring* dump_path) {
|
||||
assert(client.pid() != 0);
|
||||
assert(client.process_handle());
|
||||
|
||||
// We have to get the address of EXCEPTION_INFORMATION from
|
||||
// the client process address space.
|
||||
EXCEPTION_POINTERS* client_ex_info = NULL;
|
||||
if (!client.GetClientExceptionInfo(&client_ex_info)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DWORD client_thread_id = 0;
|
||||
if (!client.GetClientThreadId(&client_thread_id)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return dump_generator_->WriteMinidump(client.process_handle(),
|
||||
client.pid(),
|
||||
client_thread_id,
|
||||
GetCurrentThreadId(),
|
||||
client_ex_info,
|
||||
client.assert_info(),
|
||||
client.dump_type(),
|
||||
true,
|
||||
dump_path);
|
||||
}
|
||||
|
||||
} // namespace google_breakpad
|
@ -0,0 +1,292 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef CLIENT_WINDOWS_CRASH_GENERATION_CRASH_GENERATION_SERVER_H__
|
||||
#define CLIENT_WINDOWS_CRASH_GENERATION_CRASH_GENERATION_SERVER_H__
|
||||
|
||||
#include <list>
|
||||
#include <string>
|
||||
#include "client/windows/common/ipc_protocol.h"
|
||||
#include "client/windows/crash_generation/minidump_generator.h"
|
||||
#include "common/scoped_ptr.h"
|
||||
|
||||
namespace google_breakpad {
|
||||
class ClientInfo;
|
||||
|
||||
// Abstraction for server side implementation of out-of-process crash
|
||||
// generation protocol for Windows platform only. It generates Windows
|
||||
// minidump files for client processes that request dump generation. When
|
||||
// the server is requested to start listening for clients (by calling the
|
||||
// Start method), it creates a named pipe and waits for the clients to
|
||||
// register. In response, it hands them event handles that the client can
|
||||
// signal to request dump generation. When the clients request dump
|
||||
// generation in this way, the server generates Windows minidump files.
|
||||
class CrashGenerationServer {
|
||||
public:
|
||||
typedef void (*OnClientConnectedCallback)(void* context,
|
||||
const ClientInfo* client_info);
|
||||
|
||||
typedef void (*OnClientDumpRequestCallback)(void* context,
|
||||
const ClientInfo* client_info,
|
||||
const std::wstring* file_path);
|
||||
|
||||
typedef void (*OnClientExitedCallback)(void* context,
|
||||
const ClientInfo* client_info);
|
||||
|
||||
typedef void (*OnClientUploadRequestCallback)(void* context,
|
||||
const DWORD crash_id);
|
||||
|
||||
// Creates an instance with the given parameters.
|
||||
//
|
||||
// Parameter pipe_name: Name of the Windows named pipe
|
||||
// Parameter pipe_sec_attrs Security attributes to set on the pipe. Pass
|
||||
// NULL to use default security on the pipe. By default, the pipe created
|
||||
// allows Local System, Administrators and the Creator full control and
|
||||
// the Everyone group read access on the pipe.
|
||||
// Parameter connect_callback: Callback for a new client connection.
|
||||
// Parameter connect_context: Context for client connection callback.
|
||||
// Parameter crash_callback: Callback for a client crash dump request.
|
||||
// Parameter crash_context: Context for client crash dump request callback.
|
||||
// Parameter exit_callback: Callback for client process exit.
|
||||
// Parameter exit_context: Context for client exit callback.
|
||||
// Parameter generate_dumps: Whether to automatically generate dumps.
|
||||
// Client code of this class might want to generate dumps explicitly in the
|
||||
// crash dump request callback. In that case, false can be passed for this
|
||||
// parameter.
|
||||
// Parameter dump_path: Path for generating dumps; required only if true is
|
||||
// passed for generateDumps parameter; NULL can be passed otherwise.
|
||||
CrashGenerationServer(const std::wstring& pipe_name,
|
||||
SECURITY_ATTRIBUTES* pipe_sec_attrs,
|
||||
OnClientConnectedCallback connect_callback,
|
||||
void* connect_context,
|
||||
OnClientDumpRequestCallback dump_callback,
|
||||
void* dump_context,
|
||||
OnClientExitedCallback exit_callback,
|
||||
void* exit_context,
|
||||
OnClientUploadRequestCallback upload_request_callback,
|
||||
void* upload_context,
|
||||
bool generate_dumps,
|
||||
const std::wstring* dump_path);
|
||||
|
||||
~CrashGenerationServer();
|
||||
|
||||
// Performs initialization steps needed to start listening to clients. Upon
|
||||
// successful return clients may connect to this server's pipe.
|
||||
//
|
||||
// Returns true if initialization is successful; false otherwise.
|
||||
bool Start();
|
||||
|
||||
private:
|
||||
// Various states the client can be in during the handshake with
|
||||
// the server.
|
||||
enum IPCServerState {
|
||||
// Server starts in this state.
|
||||
IPC_SERVER_STATE_UNINITIALIZED,
|
||||
|
||||
// Server is in error state and it cannot serve any clients.
|
||||
IPC_SERVER_STATE_ERROR,
|
||||
|
||||
// Server starts in this state.
|
||||
IPC_SERVER_STATE_INITIAL,
|
||||
|
||||
// Server has issued an async connect to the pipe and it is waiting
|
||||
// for the connection to be established.
|
||||
IPC_SERVER_STATE_CONNECTING,
|
||||
|
||||
// Server is connected successfully.
|
||||
IPC_SERVER_STATE_CONNECTED,
|
||||
|
||||
// Server has issued an async read from the pipe and it is waiting for
|
||||
// the read to finish.
|
||||
IPC_SERVER_STATE_READING,
|
||||
|
||||
// Server is done reading from the pipe.
|
||||
IPC_SERVER_STATE_READ_DONE,
|
||||
|
||||
// Server has issued an async write to the pipe and it is waiting for
|
||||
// the write to finish.
|
||||
IPC_SERVER_STATE_WRITING,
|
||||
|
||||
// Server is done writing to the pipe.
|
||||
IPC_SERVER_STATE_WRITE_DONE,
|
||||
|
||||
// Server has issued an async read from the pipe for an ack and it
|
||||
// is waiting for the read to finish.
|
||||
IPC_SERVER_STATE_READING_ACK,
|
||||
|
||||
// Server is done writing to the pipe and it is now ready to disconnect
|
||||
// and reconnect.
|
||||
IPC_SERVER_STATE_DISCONNECTING
|
||||
};
|
||||
|
||||
//
|
||||
// Helper methods to handle various server IPC states.
|
||||
//
|
||||
void HandleErrorState();
|
||||
void HandleInitialState();
|
||||
void HandleConnectingState();
|
||||
void HandleConnectedState();
|
||||
void HandleReadingState();
|
||||
void HandleReadDoneState();
|
||||
void HandleWritingState();
|
||||
void HandleWriteDoneState();
|
||||
void HandleReadingAckState();
|
||||
void HandleDisconnectingState();
|
||||
|
||||
// Prepares reply for a client from the given parameters.
|
||||
bool PrepareReply(const ClientInfo& client_info,
|
||||
ProtocolMessage* reply) const;
|
||||
|
||||
// Duplicates various handles in the ClientInfo object for the client
|
||||
// process and stores them in the given ProtocolMessage instance. If
|
||||
// creating any handle fails, ProtocolMessage will contain the handles
|
||||
// already created successfully, which should be closed by the caller.
|
||||
bool CreateClientHandles(const ClientInfo& client_info,
|
||||
ProtocolMessage* reply) const;
|
||||
|
||||
// Response to the given client. Return true if all steps of
|
||||
// responding to the client succeed, false otherwise.
|
||||
bool RespondToClient(ClientInfo* client_info);
|
||||
|
||||
// Handles a connection request from the client.
|
||||
void HandleConnectionRequest();
|
||||
|
||||
// Handles a dump request from the client.
|
||||
void HandleDumpRequest(const ClientInfo& client_info);
|
||||
|
||||
// Callback for pipe connected event.
|
||||
static void CALLBACK OnPipeConnected(void* context, BOOLEAN timer_or_wait);
|
||||
|
||||
// Callback for a dump request.
|
||||
static void CALLBACK OnDumpRequest(void* context, BOOLEAN timer_or_wait);
|
||||
|
||||
// Callback for client process exit event.
|
||||
static void CALLBACK OnClientEnd(void* context, BOOLEAN timer_or_wait);
|
||||
|
||||
// Handles client process exit.
|
||||
void HandleClientProcessExit(ClientInfo* client_info);
|
||||
|
||||
// Adds the given client to the list of registered clients.
|
||||
bool AddClient(ClientInfo* client_info);
|
||||
|
||||
// Generates dump for the given client.
|
||||
bool GenerateDump(const ClientInfo& client, std::wstring* dump_path);
|
||||
|
||||
// Puts the server in a permanent error state and sets a signal such that
|
||||
// the state will be immediately entered after the current state transition
|
||||
// is complete.
|
||||
void EnterErrorState();
|
||||
|
||||
// Puts the server in the specified state and sets a signal such that the
|
||||
// state is immediately entered after the current state transition is
|
||||
// complete.
|
||||
void EnterStateImmediately(IPCServerState state);
|
||||
|
||||
// Puts the server in the specified state. No signal will be set, so the state
|
||||
// transition will only occur when signaled manually or by completion of an
|
||||
// asynchronous IO operation.
|
||||
void EnterStateWhenSignaled(IPCServerState state);
|
||||
|
||||
// Sync object for thread-safe access to the shared list of clients.
|
||||
CRITICAL_SECTION sync_;
|
||||
|
||||
// List of clients.
|
||||
std::list<ClientInfo*> clients_;
|
||||
|
||||
// Pipe name.
|
||||
std::wstring pipe_name_;
|
||||
|
||||
// Pipe security attributes
|
||||
SECURITY_ATTRIBUTES* pipe_sec_attrs_;
|
||||
|
||||
// Handle to the pipe used for handshake with clients.
|
||||
HANDLE pipe_;
|
||||
|
||||
// Pipe wait handle.
|
||||
HANDLE pipe_wait_handle_;
|
||||
|
||||
// Handle to server-alive mutex.
|
||||
HANDLE server_alive_handle_;
|
||||
|
||||
// Callback for a successful client connection.
|
||||
OnClientConnectedCallback connect_callback_;
|
||||
|
||||
// Context for client connected callback.
|
||||
void* connect_context_;
|
||||
|
||||
// Callback for a client dump request.
|
||||
OnClientDumpRequestCallback dump_callback_;
|
||||
|
||||
// Context for client dump request callback.
|
||||
void* dump_context_;
|
||||
|
||||
// Callback for client process exit.
|
||||
OnClientExitedCallback exit_callback_;
|
||||
|
||||
// Context for client process exit callback.
|
||||
void* exit_context_;
|
||||
|
||||
// Callback for upload request.
|
||||
OnClientUploadRequestCallback upload_request_callback_;
|
||||
|
||||
// Context for upload request callback.
|
||||
void* upload_context_;
|
||||
|
||||
// Whether to generate dumps.
|
||||
bool generate_dumps_;
|
||||
|
||||
// Instance of a mini dump generator.
|
||||
scoped_ptr<MinidumpGenerator> dump_generator_;
|
||||
|
||||
// State of the server in performing the IPC with the client.
|
||||
// Note that since we restrict the pipe to one instance, we
|
||||
// only need to keep one state of the server. Otherwise, server
|
||||
// would have one state per client it is talking to.
|
||||
IPCServerState server_state_;
|
||||
|
||||
// Whether the server is shutting down.
|
||||
bool shutting_down_;
|
||||
|
||||
// Overlapped instance for async I/O on the pipe.
|
||||
OVERLAPPED overlapped_;
|
||||
|
||||
// Message object used in IPC with the client.
|
||||
ProtocolMessage msg_;
|
||||
|
||||
// Client Info for the client that's connecting to the server.
|
||||
ClientInfo* client_info_;
|
||||
|
||||
// Disable copy ctor and operator=.
|
||||
CrashGenerationServer(const CrashGenerationServer& crash_server);
|
||||
CrashGenerationServer& operator=(const CrashGenerationServer& crash_server);
|
||||
};
|
||||
|
||||
} // namespace google_breakpad
|
||||
|
||||
#endif // CLIENT_WINDOWS_CRASH_GENERATION_CRASH_GENERATION_SERVER_H__
|
@ -0,0 +1,537 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "client/windows/crash_generation/minidump_generator.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <avrfsdk.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
#include "client/windows/common/auto_critical_section.h"
|
||||
#include "common/windows/guid_string.h"
|
||||
|
||||
using std::wstring;
|
||||
|
||||
namespace {
|
||||
|
||||
// A helper class used to collect handle operations data. Unlike
|
||||
// |MiniDumpWithHandleData| it records the operations for a single handle value
|
||||
// only, making it possible to include this information to a minidump.
|
||||
class HandleTraceData {
|
||||
public:
|
||||
HandleTraceData();
|
||||
~HandleTraceData();
|
||||
|
||||
// Collects the handle operations data and formats a user stream to be added
|
||||
// to the minidump.
|
||||
bool CollectHandleData(HANDLE process_handle,
|
||||
EXCEPTION_POINTERS* exception_pointers);
|
||||
|
||||
// Fills the user dump entry with a pointer to the collected handle operations
|
||||
// data. Returns |true| if the entry was initialized successfully, or |false|
|
||||
// if no trace data is available.
|
||||
bool GetUserStream(MINIDUMP_USER_STREAM* user_stream);
|
||||
|
||||
private:
|
||||
// Reads the exception code from the client process's address space.
|
||||
// This routine assumes that the client process's pointer width matches ours.
|
||||
static bool ReadExceptionCode(HANDLE process_handle,
|
||||
EXCEPTION_POINTERS* exception_pointers,
|
||||
DWORD* exception_code);
|
||||
|
||||
// Stores handle operations retrieved by VerifierEnumerateResource().
|
||||
static ULONG CALLBACK RecordHandleOperations(void* resource_description,
|
||||
void* enumeration_context,
|
||||
ULONG* enumeration_level);
|
||||
|
||||
// Function pointer type for VerifierEnumerateResource, which is looked up
|
||||
// dynamically.
|
||||
typedef BOOL (WINAPI* VerifierEnumerateResourceType)(
|
||||
HANDLE Process,
|
||||
ULONG Flags,
|
||||
ULONG ResourceType,
|
||||
AVRF_RESOURCE_ENUMERATE_CALLBACK ResourceCallback,
|
||||
PVOID EnumerationContext);
|
||||
|
||||
// Handle to dynamically loaded verifier.dll.
|
||||
HMODULE verifier_module_;
|
||||
|
||||
// Pointer to the VerifierEnumerateResource function.
|
||||
VerifierEnumerateResourceType enumerate_resource_;
|
||||
|
||||
// Handle value to look for.
|
||||
ULONG64 handle_;
|
||||
|
||||
// List of handle operations for |handle_|.
|
||||
std::list<AVRF_HANDLE_OPERATION> operations_;
|
||||
|
||||
// Minidump stream data.
|
||||
std::vector<char> stream_;
|
||||
};
|
||||
|
||||
HandleTraceData::HandleTraceData()
|
||||
: verifier_module_(NULL),
|
||||
enumerate_resource_(NULL),
|
||||
handle_(NULL) {
|
||||
}
|
||||
|
||||
HandleTraceData::~HandleTraceData() {
|
||||
if (verifier_module_) {
|
||||
FreeLibrary(verifier_module_);
|
||||
}
|
||||
}
|
||||
|
||||
bool HandleTraceData::CollectHandleData(
|
||||
HANDLE process_handle,
|
||||
EXCEPTION_POINTERS* exception_pointers) {
|
||||
DWORD exception_code;
|
||||
if (!ReadExceptionCode(process_handle, exception_pointers, &exception_code)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Verify whether the execption is STATUS_INVALID_HANDLE. Do not record any
|
||||
// handle information if it is a different exception to keep the minidump
|
||||
// small.
|
||||
if (exception_code != STATUS_INVALID_HANDLE) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Load verifier!VerifierEnumerateResource() dynamically.
|
||||
verifier_module_ = LoadLibrary(TEXT("verifier.dll"));
|
||||
if (!verifier_module_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
enumerate_resource_ = reinterpret_cast<VerifierEnumerateResourceType>(
|
||||
GetProcAddress(verifier_module_, "VerifierEnumerateResource"));
|
||||
if (!enumerate_resource_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// STATUS_INVALID_HANDLE does not provide the offending handle value in
|
||||
// the exception parameters so we have to guess. At the moment we scan
|
||||
// the handle operations trace looking for the last invalid handle operation
|
||||
// and record only the operations for that handle value.
|
||||
if (enumerate_resource_(process_handle,
|
||||
0,
|
||||
AvrfResourceHandleTrace,
|
||||
&RecordHandleOperations,
|
||||
this) != ERROR_SUCCESS) {
|
||||
// The handle tracing must have not been enabled.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Now that |handle_| is initialized, purge all irrelevant operations.
|
||||
std::list<AVRF_HANDLE_OPERATION>::iterator i = operations_.begin();
|
||||
std::list<AVRF_HANDLE_OPERATION>::iterator i_end = operations_.end();
|
||||
while (i != i_end) {
|
||||
if (i->Handle == handle_) {
|
||||
++i;
|
||||
} else {
|
||||
i = operations_.erase(i);
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the list of recorded operations to a minidump stream.
|
||||
stream_.resize(sizeof(MINIDUMP_HANDLE_OPERATION_LIST) +
|
||||
sizeof(AVRF_HANDLE_OPERATION) * operations_.size());
|
||||
|
||||
MINIDUMP_HANDLE_OPERATION_LIST* stream_data =
|
||||
reinterpret_cast<MINIDUMP_HANDLE_OPERATION_LIST*>(
|
||||
&stream_.front());
|
||||
stream_data->SizeOfHeader = sizeof(MINIDUMP_HANDLE_OPERATION_LIST);
|
||||
stream_data->SizeOfEntry = sizeof(AVRF_HANDLE_OPERATION);
|
||||
stream_data->NumberOfEntries = static_cast<ULONG32>(operations_.size());
|
||||
stream_data->Reserved = 0;
|
||||
std::copy(operations_.begin(),
|
||||
operations_.end(),
|
||||
stdext::checked_array_iterator<AVRF_HANDLE_OPERATION*>(
|
||||
reinterpret_cast<AVRF_HANDLE_OPERATION*>(stream_data + 1),
|
||||
operations_.size()));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool HandleTraceData::GetUserStream(MINIDUMP_USER_STREAM* user_stream) {
|
||||
if (stream_.empty()) {
|
||||
return false;
|
||||
} else {
|
||||
user_stream->Type = HandleOperationListStream;
|
||||
user_stream->BufferSize = static_cast<ULONG>(stream_.size());
|
||||
user_stream->Buffer = &stream_.front();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool HandleTraceData::ReadExceptionCode(
|
||||
HANDLE process_handle,
|
||||
EXCEPTION_POINTERS* exception_pointers,
|
||||
DWORD* exception_code) {
|
||||
EXCEPTION_POINTERS pointers;
|
||||
if (!ReadProcessMemory(process_handle,
|
||||
exception_pointers,
|
||||
&pointers,
|
||||
sizeof(pointers),
|
||||
NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ReadProcessMemory(process_handle,
|
||||
pointers.ExceptionRecord,
|
||||
exception_code,
|
||||
sizeof(*exception_code),
|
||||
NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ULONG CALLBACK HandleTraceData::RecordHandleOperations(
|
||||
void* resource_description,
|
||||
void* enumeration_context,
|
||||
ULONG* enumeration_level) {
|
||||
AVRF_HANDLE_OPERATION* description =
|
||||
reinterpret_cast<AVRF_HANDLE_OPERATION*>(resource_description);
|
||||
HandleTraceData* self =
|
||||
reinterpret_cast<HandleTraceData*>(enumeration_context);
|
||||
|
||||
// Remember the last invalid handle operation.
|
||||
if (description->OperationType == OperationDbBADREF) {
|
||||
self->handle_ = description->Handle;
|
||||
}
|
||||
|
||||
// Record all handle operations.
|
||||
self->operations_.push_back(*description);
|
||||
|
||||
*enumeration_level = HeapEnumerationEverything;
|
||||
return ERROR_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
MinidumpGenerator::MinidumpGenerator(const wstring& dump_path)
|
||||
: dbghelp_module_(NULL),
|
||||
rpcrt4_module_(NULL),
|
||||
dump_path_(dump_path),
|
||||
write_dump_(NULL),
|
||||
create_uuid_(NULL) {
|
||||
InitializeCriticalSection(&module_load_sync_);
|
||||
InitializeCriticalSection(&get_proc_address_sync_);
|
||||
}
|
||||
|
||||
MinidumpGenerator::~MinidumpGenerator() {
|
||||
if (dbghelp_module_) {
|
||||
FreeLibrary(dbghelp_module_);
|
||||
}
|
||||
|
||||
if (rpcrt4_module_) {
|
||||
FreeLibrary(rpcrt4_module_);
|
||||
}
|
||||
|
||||
DeleteCriticalSection(&get_proc_address_sync_);
|
||||
DeleteCriticalSection(&module_load_sync_);
|
||||
}
|
||||
|
||||
bool MinidumpGenerator::WriteMinidump(HANDLE process_handle,
|
||||
DWORD process_id,
|
||||
DWORD thread_id,
|
||||
DWORD requesting_thread_id,
|
||||
EXCEPTION_POINTERS* exception_pointers,
|
||||
MDRawAssertionInfo* assert_info,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
bool is_client_pointers,
|
||||
wstring* dump_path) {
|
||||
// Just call the full WriteMinidump with NULL as the full_dump_path.
|
||||
return this->WriteMinidump(process_handle, process_id, thread_id,
|
||||
requesting_thread_id, exception_pointers,
|
||||
assert_info, dump_type, is_client_pointers,
|
||||
dump_path, NULL);
|
||||
}
|
||||
|
||||
bool MinidumpGenerator::WriteMinidump(HANDLE process_handle,
|
||||
DWORD process_id,
|
||||
DWORD thread_id,
|
||||
DWORD requesting_thread_id,
|
||||
EXCEPTION_POINTERS* exception_pointers,
|
||||
MDRawAssertionInfo* assert_info,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
bool is_client_pointers,
|
||||
wstring* dump_path,
|
||||
wstring* full_dump_path) {
|
||||
MiniDumpWriteDumpType write_dump = GetWriteDump();
|
||||
if (!write_dump) {
|
||||
return false;
|
||||
}
|
||||
|
||||
wstring dump_file_path;
|
||||
if (!GenerateDumpFilePath(&dump_file_path)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If the client requests a full memory dump, we will write a normal mini
|
||||
// dump and a full memory dump. Both dump files use the same uuid as file
|
||||
// name prefix.
|
||||
bool full_memory_dump = (dump_type & MiniDumpWithFullMemory) != 0;
|
||||
wstring full_dump_file_path;
|
||||
if (full_memory_dump) {
|
||||
full_dump_file_path.assign(dump_file_path);
|
||||
full_dump_file_path.resize(full_dump_file_path.size() - 4); // strip .dmp
|
||||
full_dump_file_path.append(TEXT("-full.dmp"));
|
||||
}
|
||||
|
||||
HANDLE dump_file = CreateFile(dump_file_path.c_str(),
|
||||
GENERIC_WRITE,
|
||||
0,
|
||||
NULL,
|
||||
CREATE_NEW,
|
||||
FILE_ATTRIBUTE_NORMAL,
|
||||
NULL);
|
||||
|
||||
if (dump_file == INVALID_HANDLE_VALUE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
HANDLE full_dump_file = INVALID_HANDLE_VALUE;
|
||||
if (full_memory_dump) {
|
||||
full_dump_file = CreateFile(full_dump_file_path.c_str(),
|
||||
GENERIC_WRITE,
|
||||
0,
|
||||
NULL,
|
||||
CREATE_NEW,
|
||||
FILE_ATTRIBUTE_NORMAL,
|
||||
NULL);
|
||||
|
||||
if (full_dump_file == INVALID_HANDLE_VALUE) {
|
||||
CloseHandle(dump_file);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
MINIDUMP_EXCEPTION_INFORMATION* dump_exception_pointers = NULL;
|
||||
MINIDUMP_EXCEPTION_INFORMATION dump_exception_info;
|
||||
|
||||
// Setup the exception information object only if it's a dump
|
||||
// due to an exception.
|
||||
if (exception_pointers) {
|
||||
dump_exception_pointers = &dump_exception_info;
|
||||
dump_exception_info.ThreadId = thread_id;
|
||||
dump_exception_info.ExceptionPointers = exception_pointers;
|
||||
dump_exception_info.ClientPointers = is_client_pointers;
|
||||
}
|
||||
|
||||
// Add an MDRawBreakpadInfo stream to the minidump, to provide additional
|
||||
// information about the exception handler to the Breakpad processor.
|
||||
// The information will help the processor determine which threads are
|
||||
// relevant. The Breakpad processor does not require this information but
|
||||
// can function better with Breakpad-generated dumps when it is present.
|
||||
// The native debugger is not harmed by the presence of this information.
|
||||
MDRawBreakpadInfo breakpad_info = {0};
|
||||
if (!is_client_pointers) {
|
||||
// Set the dump thread id and requesting thread id only in case of
|
||||
// in-process dump generation.
|
||||
breakpad_info.validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID |
|
||||
MD_BREAKPAD_INFO_VALID_REQUESTING_THREAD_ID;
|
||||
breakpad_info.dump_thread_id = thread_id;
|
||||
breakpad_info.requesting_thread_id = requesting_thread_id;
|
||||
}
|
||||
|
||||
// Leave room in user_stream_array for possible assertion info and handle
|
||||
// operations streams.
|
||||
MINIDUMP_USER_STREAM user_stream_array[3];
|
||||
user_stream_array[0].Type = MD_BREAKPAD_INFO_STREAM;
|
||||
user_stream_array[0].BufferSize = sizeof(breakpad_info);
|
||||
user_stream_array[0].Buffer = &breakpad_info;
|
||||
|
||||
MINIDUMP_USER_STREAM_INFORMATION user_streams;
|
||||
user_streams.UserStreamCount = 1;
|
||||
user_streams.UserStreamArray = user_stream_array;
|
||||
|
||||
MDRawAssertionInfo* actual_assert_info = assert_info;
|
||||
MDRawAssertionInfo client_assert_info = {0};
|
||||
|
||||
if (assert_info) {
|
||||
// If the assertion info object lives in the client process,
|
||||
// read the memory of the client process.
|
||||
if (is_client_pointers) {
|
||||
SIZE_T bytes_read = 0;
|
||||
if (!ReadProcessMemory(process_handle,
|
||||
assert_info,
|
||||
&client_assert_info,
|
||||
sizeof(client_assert_info),
|
||||
&bytes_read)) {
|
||||
CloseHandle(dump_file);
|
||||
if (full_dump_file != INVALID_HANDLE_VALUE)
|
||||
CloseHandle(full_dump_file);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (bytes_read != sizeof(client_assert_info)) {
|
||||
CloseHandle(dump_file);
|
||||
if (full_dump_file != INVALID_HANDLE_VALUE)
|
||||
CloseHandle(full_dump_file);
|
||||
return false;
|
||||
}
|
||||
|
||||
actual_assert_info = &client_assert_info;
|
||||
}
|
||||
|
||||
user_stream_array[1].Type = MD_ASSERTION_INFO_STREAM;
|
||||
user_stream_array[1].BufferSize = sizeof(MDRawAssertionInfo);
|
||||
user_stream_array[1].Buffer = actual_assert_info;
|
||||
++user_streams.UserStreamCount;
|
||||
}
|
||||
|
||||
// If the process is terminated by STATUS_INVALID_HANDLE exception store
|
||||
// the trace of operatios for the offending handle value. Do nothing special
|
||||
// if the client already requested the handle trace to be stored in the dump.
|
||||
HandleTraceData handle_trace_data;
|
||||
if (exception_pointers && (dump_type & MiniDumpWithHandleData) == 0) {
|
||||
if (!handle_trace_data.CollectHandleData(process_handle,
|
||||
exception_pointers)) {
|
||||
CloseHandle(dump_file);
|
||||
if (full_dump_file != INVALID_HANDLE_VALUE)
|
||||
CloseHandle(full_dump_file);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool result_full_memory = true;
|
||||
if (full_memory_dump) {
|
||||
result_full_memory = write_dump(
|
||||
process_handle,
|
||||
process_id,
|
||||
full_dump_file,
|
||||
static_cast<MINIDUMP_TYPE>((dump_type & (~MiniDumpNormal))
|
||||
| MiniDumpWithHandleData),
|
||||
exception_pointers ? &dump_exception_info : NULL,
|
||||
&user_streams,
|
||||
NULL) != FALSE;
|
||||
}
|
||||
|
||||
// Add handle operations trace stream to the minidump if it was collected.
|
||||
if (handle_trace_data.GetUserStream(
|
||||
&user_stream_array[user_streams.UserStreamCount])) {
|
||||
++user_streams.UserStreamCount;
|
||||
}
|
||||
|
||||
bool result_minidump = write_dump(
|
||||
process_handle,
|
||||
process_id,
|
||||
dump_file,
|
||||
static_cast<MINIDUMP_TYPE>((dump_type & (~MiniDumpWithFullMemory))
|
||||
| MiniDumpNormal),
|
||||
exception_pointers ? &dump_exception_info : NULL,
|
||||
&user_streams,
|
||||
NULL) != FALSE;
|
||||
|
||||
bool result = result_minidump && result_full_memory;
|
||||
|
||||
CloseHandle(dump_file);
|
||||
if (full_dump_file != INVALID_HANDLE_VALUE)
|
||||
CloseHandle(full_dump_file);
|
||||
|
||||
// Store the path of the dump file in the out parameter if dump generation
|
||||
// succeeded.
|
||||
if (result && dump_path) {
|
||||
*dump_path = dump_file_path;
|
||||
}
|
||||
if (result && full_memory_dump && full_dump_path) {
|
||||
*full_dump_path = full_dump_file_path;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
HMODULE MinidumpGenerator::GetDbghelpModule() {
|
||||
AutoCriticalSection lock(&module_load_sync_);
|
||||
if (!dbghelp_module_) {
|
||||
dbghelp_module_ = LoadLibrary(TEXT("dbghelp.dll"));
|
||||
}
|
||||
|
||||
return dbghelp_module_;
|
||||
}
|
||||
|
||||
MinidumpGenerator::MiniDumpWriteDumpType MinidumpGenerator::GetWriteDump() {
|
||||
AutoCriticalSection lock(&get_proc_address_sync_);
|
||||
if (!write_dump_) {
|
||||
HMODULE module = GetDbghelpModule();
|
||||
if (module) {
|
||||
FARPROC proc = GetProcAddress(module, "MiniDumpWriteDump");
|
||||
write_dump_ = reinterpret_cast<MiniDumpWriteDumpType>(proc);
|
||||
}
|
||||
}
|
||||
|
||||
return write_dump_;
|
||||
}
|
||||
|
||||
HMODULE MinidumpGenerator::GetRpcrt4Module() {
|
||||
AutoCriticalSection lock(&module_load_sync_);
|
||||
if (!rpcrt4_module_) {
|
||||
rpcrt4_module_ = LoadLibrary(TEXT("rpcrt4.dll"));
|
||||
}
|
||||
|
||||
return rpcrt4_module_;
|
||||
}
|
||||
|
||||
MinidumpGenerator::UuidCreateType MinidumpGenerator::GetCreateUuid() {
|
||||
AutoCriticalSection lock(&module_load_sync_);
|
||||
if (!create_uuid_) {
|
||||
HMODULE module = GetRpcrt4Module();
|
||||
if (module) {
|
||||
FARPROC proc = GetProcAddress(module, "UuidCreate");
|
||||
create_uuid_ = reinterpret_cast<UuidCreateType>(proc);
|
||||
}
|
||||
}
|
||||
|
||||
return create_uuid_;
|
||||
}
|
||||
|
||||
bool MinidumpGenerator::GenerateDumpFilePath(wstring* file_path) {
|
||||
UUID id = {0};
|
||||
|
||||
UuidCreateType create_uuid = GetCreateUuid();
|
||||
if (!create_uuid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
create_uuid(&id);
|
||||
wstring id_str = GUIDString::GUIDToWString(&id);
|
||||
|
||||
*file_path = dump_path_ + TEXT("\\") + id_str + TEXT(".dmp");
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace google_breakpad
|
@ -0,0 +1,136 @@
|
||||
// Copyright (c) 2008, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef CLIENT_WINDOWS_CRASH_GENERATION_MINIDUMP_GENERATOR_H_
|
||||
#define CLIENT_WINDOWS_CRASH_GENERATION_MINIDUMP_GENERATOR_H_
|
||||
|
||||
#include <windows.h>
|
||||
#include <dbghelp.h>
|
||||
#include <rpc.h>
|
||||
#include <list>
|
||||
#include "google_breakpad/common/minidump_format.h"
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
// Abstraction for various objects and operations needed to generate
|
||||
// minidump on Windows. This abstraction is useful to hide all the gory
|
||||
// details for minidump generation and provide a clean interface to
|
||||
// the clients to generate minidumps.
|
||||
class MinidumpGenerator {
|
||||
public:
|
||||
// Creates an instance with the given dump path.
|
||||
explicit MinidumpGenerator(const std::wstring& dump_path);
|
||||
|
||||
~MinidumpGenerator();
|
||||
|
||||
// Writes the minidump with the given parameters. Stores the
|
||||
// dump file path in the dump_path parameter if dump generation
|
||||
// succeeds.
|
||||
bool WriteMinidump(HANDLE process_handle,
|
||||
DWORD process_id,
|
||||
DWORD thread_id,
|
||||
DWORD requesting_thread_id,
|
||||
EXCEPTION_POINTERS* exception_pointers,
|
||||
MDRawAssertionInfo* assert_info,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
bool is_client_pointers,
|
||||
std::wstring* dump_path);
|
||||
|
||||
// Writes the minidump with the given parameters. Stores the dump file
|
||||
// path in the dump_path (and full_dump_path) parameter if dump
|
||||
// generation succeeds. full_dump_path and dump_path can be NULL.
|
||||
bool WriteMinidump(HANDLE process_handle,
|
||||
DWORD process_id,
|
||||
DWORD thread_id,
|
||||
DWORD requesting_thread_id,
|
||||
EXCEPTION_POINTERS* exception_pointers,
|
||||
MDRawAssertionInfo* assert_info,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
bool is_client_pointers,
|
||||
std::wstring* dump_path,
|
||||
std::wstring* full_dump_path);
|
||||
|
||||
private:
|
||||
// Function pointer type for MiniDumpWriteDump, which is looked up
|
||||
// dynamically.
|
||||
typedef BOOL (WINAPI* MiniDumpWriteDumpType)(
|
||||
HANDLE hProcess,
|
||||
DWORD ProcessId,
|
||||
HANDLE hFile,
|
||||
MINIDUMP_TYPE DumpType,
|
||||
CONST PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
|
||||
CONST PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
|
||||
CONST PMINIDUMP_CALLBACK_INFORMATION CallbackParam);
|
||||
|
||||
// Function pointer type for UuidCreate, which is looked up dynamically.
|
||||
typedef RPC_STATUS (RPC_ENTRY* UuidCreateType)(UUID* Uuid);
|
||||
|
||||
// Loads the appropriate DLL lazily in a thread safe way.
|
||||
HMODULE GetDbghelpModule();
|
||||
|
||||
// Loads the appropriate DLL and gets a pointer to the MiniDumpWriteDump
|
||||
// function lazily and in a thread-safe manner.
|
||||
MiniDumpWriteDumpType GetWriteDump();
|
||||
|
||||
// Loads the appropriate DLL lazily in a thread safe way.
|
||||
HMODULE GetRpcrt4Module();
|
||||
|
||||
// Loads the appropriate DLL and gets a pointer to the UuidCreate
|
||||
// function lazily and in a thread-safe manner.
|
||||
UuidCreateType GetCreateUuid();
|
||||
|
||||
// Returns the path for the file to write dump to.
|
||||
bool GenerateDumpFilePath(std::wstring* file_path);
|
||||
|
||||
// Handle to dynamically loaded DbgHelp.dll.
|
||||
HMODULE dbghelp_module_;
|
||||
|
||||
// Pointer to the MiniDumpWriteDump function.
|
||||
MiniDumpWriteDumpType write_dump_;
|
||||
|
||||
// Handle to dynamically loaded rpcrt4.dll.
|
||||
HMODULE rpcrt4_module_;
|
||||
|
||||
// Pointer to the UuidCreate function.
|
||||
UuidCreateType create_uuid_;
|
||||
|
||||
// Folder path to store dump files.
|
||||
std::wstring dump_path_;
|
||||
|
||||
// Critical section to sychronize action of loading modules dynamically.
|
||||
CRITICAL_SECTION module_load_sync_;
|
||||
|
||||
// Critical section to synchronize action of dynamically getting function
|
||||
// addresses from modules.
|
||||
CRITICAL_SECTION get_proc_address_sync_;
|
||||
};
|
||||
|
||||
} // namespace google_breakpad
|
||||
|
||||
#endif // CLIENT_WINDOWS_CRASH_GENERATION_MINIDUMP_GENERATOR_H_
|
1067
google-breakpad/src/client/windows/handler/exception_handler.cc
Normal file
1067
google-breakpad/src/client/windows/handler/exception_handler.cc
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,48 @@
|
||||
# Copyright (c) 2010, Google Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
{
|
||||
'includes': [
|
||||
'../build/common.gypi',
|
||||
],
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'exception_handler',
|
||||
'type': 'static_library',
|
||||
'sources': [
|
||||
"exception_handler.cc",
|
||||
"exception_handler.h",
|
||||
],
|
||||
'dependencies': [
|
||||
'../breakpad_client.gyp:common',
|
||||
'../crash_generation/crash_generation.gyp:crash_generation_client',
|
||||
]
|
||||
},
|
||||
],
|
||||
}
|
511
google-breakpad/src/client/windows/handler/exception_handler.h
Normal file
511
google-breakpad/src/client/windows/handler/exception_handler.h
Normal file
@ -0,0 +1,511 @@
|
||||
// Copyright (c) 2006, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// ExceptionHandler can write a minidump file when an exception occurs,
|
||||
// or when WriteMinidump() is called explicitly by your program.
|
||||
//
|
||||
// To have the exception handler write minidumps when an uncaught exception
|
||||
// (crash) occurs, you should create an instance early in the execution
|
||||
// of your program, and keep it around for the entire time you want to
|
||||
// have crash handling active (typically, until shutdown).
|
||||
//
|
||||
// If you want to write minidumps without installing the exception handler,
|
||||
// you can create an ExceptionHandler with install_handler set to false,
|
||||
// then call WriteMinidump. You can also use this technique if you want to
|
||||
// use different minidump callbacks for different call sites.
|
||||
//
|
||||
// In either case, a callback function is called when a minidump is written,
|
||||
// which receives the unqiue id of the minidump. The caller can use this
|
||||
// id to collect and write additional application state, and to launch an
|
||||
// external crash-reporting application.
|
||||
//
|
||||
// It is important that creation and destruction of ExceptionHandler objects
|
||||
// be nested cleanly, when using install_handler = true.
|
||||
// Avoid the following pattern:
|
||||
// ExceptionHandler *e = new ExceptionHandler(...);
|
||||
// ExceptionHandler *f = new ExceptionHandler(...);
|
||||
// delete e;
|
||||
// This will put the exception filter stack into an inconsistent state.
|
||||
|
||||
#ifndef CLIENT_WINDOWS_HANDLER_EXCEPTION_HANDLER_H__
|
||||
#define CLIENT_WINDOWS_HANDLER_EXCEPTION_HANDLER_H__
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <Windows.h>
|
||||
#include <DbgHelp.h>
|
||||
#include <rpc.h>
|
||||
|
||||
#pragma warning( push )
|
||||
// Disable exception handler warnings.
|
||||
#pragma warning( disable : 4530 )
|
||||
|
||||
#include <list>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "client/windows/common/ipc_protocol.h"
|
||||
#include "client/windows/crash_generation/crash_generation_client.h"
|
||||
#include "common/scoped_ptr.h"
|
||||
#include "google_breakpad/common/minidump_format.h"
|
||||
|
||||
namespace google_breakpad {
|
||||
|
||||
using std::vector;
|
||||
using std::wstring;
|
||||
|
||||
// These entries store a list of memory regions that the client wants included
|
||||
// in the minidump.
|
||||
struct AppMemory {
|
||||
ULONG64 ptr;
|
||||
ULONG length;
|
||||
|
||||
bool operator==(const struct AppMemory& other) const {
|
||||
return ptr == other.ptr;
|
||||
}
|
||||
|
||||
bool operator==(const void* other) const {
|
||||
return ptr == reinterpret_cast<ULONG64>(other);
|
||||
}
|
||||
};
|
||||
typedef std::list<AppMemory> AppMemoryList;
|
||||
|
||||
class ExceptionHandler {
|
||||
public:
|
||||
// A callback function to run before Breakpad performs any substantial
|
||||
// processing of an exception. A FilterCallback is called before writing
|
||||
// a minidump. context is the parameter supplied by the user as
|
||||
// callback_context when the handler was created. exinfo points to the
|
||||
// exception record, if any; assertion points to assertion information,
|
||||
// if any.
|
||||
//
|
||||
// If a FilterCallback returns true, Breakpad will continue processing,
|
||||
// attempting to write a minidump. If a FilterCallback returns false,
|
||||
// Breakpad will immediately report the exception as unhandled without
|
||||
// writing a minidump, allowing another handler the opportunity to handle it.
|
||||
typedef bool (*FilterCallback)(void* context, EXCEPTION_POINTERS* exinfo,
|
||||
MDRawAssertionInfo* assertion);
|
||||
|
||||
// A callback function to run after the minidump has been written.
|
||||
// minidump_id is a unique id for the dump, so the minidump
|
||||
// file is <dump_path>\<minidump_id>.dmp. context is the parameter supplied
|
||||
// by the user as callback_context when the handler was created. exinfo
|
||||
// points to the exception record, or NULL if no exception occurred.
|
||||
// succeeded indicates whether a minidump file was successfully written.
|
||||
// assertion points to information about an assertion if the handler was
|
||||
// invoked by an assertion.
|
||||
//
|
||||
// If an exception occurred and the callback returns true, Breakpad will treat
|
||||
// the exception as fully-handled, suppressing any other handlers from being
|
||||
// notified of the exception. If the callback returns false, Breakpad will
|
||||
// treat the exception as unhandled, and allow another handler to handle it.
|
||||
// If there are no other handlers, Breakpad will report the exception to the
|
||||
// system as unhandled, allowing a debugger or native crash dialog the
|
||||
// opportunity to handle the exception. Most callback implementations
|
||||
// should normally return the value of |succeeded|, or when they wish to
|
||||
// not report an exception of handled, false. Callbacks will rarely want to
|
||||
// return true directly (unless |succeeded| is true).
|
||||
//
|
||||
// For out-of-process dump generation, dump path and minidump ID will always
|
||||
// be NULL. In case of out-of-process dump generation, the dump path and
|
||||
// minidump id are controlled by the server process and are not communicated
|
||||
// back to the crashing process.
|
||||
typedef bool (*MinidumpCallback)(const wchar_t* dump_path,
|
||||
const wchar_t* minidump_id,
|
||||
void* context,
|
||||
EXCEPTION_POINTERS* exinfo,
|
||||
MDRawAssertionInfo* assertion,
|
||||
bool succeeded);
|
||||
|
||||
// HandlerType specifies which types of handlers should be installed, if
|
||||
// any. Use HANDLER_NONE for an ExceptionHandler that remains idle,
|
||||
// without catching any failures on its own. This type of handler may
|
||||
// still be triggered by calling WriteMinidump. Otherwise, use a
|
||||
// combination of the other HANDLER_ values, or HANDLER_ALL to install
|
||||
// all handlers.
|
||||
enum HandlerType {
|
||||
HANDLER_NONE = 0,
|
||||
HANDLER_EXCEPTION = 1 << 0, // SetUnhandledExceptionFilter
|
||||
HANDLER_INVALID_PARAMETER = 1 << 1, // _set_invalid_parameter_handler
|
||||
HANDLER_PURECALL = 1 << 2, // _set_purecall_handler
|
||||
HANDLER_ALL = HANDLER_EXCEPTION |
|
||||
HANDLER_INVALID_PARAMETER |
|
||||
HANDLER_PURECALL
|
||||
};
|
||||
|
||||
// Creates a new ExceptionHandler instance to handle writing minidumps.
|
||||
// Before writing a minidump, the optional filter callback will be called.
|
||||
// Its return value determines whether or not Breakpad should write a
|
||||
// minidump. Minidump files will be written to dump_path, and the optional
|
||||
// callback is called after writing the dump file, as described above.
|
||||
// handler_types specifies the types of handlers that should be installed.
|
||||
ExceptionHandler(const wstring& dump_path,
|
||||
FilterCallback filter,
|
||||
MinidumpCallback callback,
|
||||
void* callback_context,
|
||||
int handler_types);
|
||||
|
||||
// Creates a new ExceptionHandler instance that can attempt to perform
|
||||
// out-of-process dump generation if pipe_name is not NULL. If pipe_name is
|
||||
// NULL, or if out-of-process dump generation registration step fails,
|
||||
// in-process dump generation will be used. This also allows specifying
|
||||
// the dump type to generate.
|
||||
ExceptionHandler(const wstring& dump_path,
|
||||
FilterCallback filter,
|
||||
MinidumpCallback callback,
|
||||
void* callback_context,
|
||||
int handler_types,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
const wchar_t* pipe_name,
|
||||
const CustomClientInfo* custom_info);
|
||||
|
||||
// As above, creates a new ExceptionHandler instance to perform
|
||||
// out-of-process dump generation if the given pipe_handle is not NULL.
|
||||
ExceptionHandler(const wstring& dump_path,
|
||||
FilterCallback filter,
|
||||
MinidumpCallback callback,
|
||||
void* callback_context,
|
||||
int handler_types,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
HANDLE pipe_handle,
|
||||
const CustomClientInfo* custom_info);
|
||||
|
||||
// ExceptionHandler that ENSURES out-of-process dump generation. Expects a
|
||||
// crash generation client that is already registered with a crash generation
|
||||
// server. Takes ownership of the passed-in crash_generation_client.
|
||||
//
|
||||
// Usage example:
|
||||
// crash_generation_client = new CrashGenerationClient(..);
|
||||
// if (crash_generation_client->Register()) {
|
||||
// // Registration with the crash generation server succeeded.
|
||||
// // Out-of-process dump generation is guaranteed.
|
||||
// g_handler = new ExceptionHandler(.., crash_generation_client, ..);
|
||||
// return true;
|
||||
// }
|
||||
ExceptionHandler(const wstring& dump_path,
|
||||
FilterCallback filter,
|
||||
MinidumpCallback callback,
|
||||
void* callback_context,
|
||||
int handler_types,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
CrashGenerationClient* crash_generation_client,
|
||||
const CustomClientInfo* custom_info);
|
||||
|
||||
~ExceptionHandler();
|
||||
|
||||
// Get and set the minidump path.
|
||||
wstring dump_path() const { return dump_path_; }
|
||||
void set_dump_path(const wstring &dump_path) {
|
||||
dump_path_ = dump_path;
|
||||
dump_path_c_ = dump_path_.c_str();
|
||||
UpdateNextID(); // Necessary to put dump_path_ in next_minidump_path_.
|
||||
}
|
||||
|
||||
// Requests that a previously reported crash be uploaded.
|
||||
bool RequestUpload(DWORD crash_id);
|
||||
|
||||
// Writes a minidump immediately. This can be used to capture the
|
||||
// execution state independently of a crash. Returns true on success.
|
||||
bool WriteMinidump();
|
||||
|
||||
// Writes a minidump immediately, with the user-supplied exception
|
||||
// information.
|
||||
bool WriteMinidumpForException(EXCEPTION_POINTERS* exinfo);
|
||||
|
||||
// Convenience form of WriteMinidump which does not require an
|
||||
// ExceptionHandler instance.
|
||||
static bool WriteMinidump(const wstring &dump_path,
|
||||
MinidumpCallback callback, void* callback_context);
|
||||
|
||||
// Write a minidump of |child| immediately. This can be used to
|
||||
// capture the execution state of |child| independently of a crash.
|
||||
// Pass a meaningful |child_blamed_thread| to make that thread in
|
||||
// the child process the one from which a crash signature is
|
||||
// extracted.
|
||||
static bool WriteMinidumpForChild(HANDLE child,
|
||||
DWORD child_blamed_thread,
|
||||
const wstring& dump_path,
|
||||
MinidumpCallback callback,
|
||||
void* callback_context);
|
||||
|
||||
// Get the thread ID of the thread requesting the dump (either the exception
|
||||
// thread or any other thread that called WriteMinidump directly). This
|
||||
// may be useful if you want to include additional thread state in your
|
||||
// dumps.
|
||||
DWORD get_requesting_thread_id() const { return requesting_thread_id_; }
|
||||
|
||||
// Controls behavior of EXCEPTION_BREAKPOINT and EXCEPTION_SINGLE_STEP.
|
||||
bool get_handle_debug_exceptions() const { return handle_debug_exceptions_; }
|
||||
void set_handle_debug_exceptions(bool handle_debug_exceptions) {
|
||||
handle_debug_exceptions_ = handle_debug_exceptions;
|
||||
}
|
||||
|
||||
// Returns whether out-of-process dump generation is used or not.
|
||||
bool IsOutOfProcess() const { return crash_generation_client_.get() != NULL; }
|
||||
|
||||
// Calling RegisterAppMemory(p, len) causes len bytes starting
|
||||
// at address p to be copied to the minidump when a crash happens.
|
||||
void RegisterAppMemory(void* ptr, size_t length);
|
||||
void UnregisterAppMemory(void* ptr);
|
||||
|
||||
private:
|
||||
friend class AutoExceptionHandler;
|
||||
|
||||
// Initializes the instance with given values.
|
||||
void Initialize(const wstring& dump_path,
|
||||
FilterCallback filter,
|
||||
MinidumpCallback callback,
|
||||
void* callback_context,
|
||||
int handler_types,
|
||||
MINIDUMP_TYPE dump_type,
|
||||
const wchar_t* pipe_name,
|
||||
HANDLE pipe_handle,
|
||||
CrashGenerationClient* crash_generation_client,
|
||||
const CustomClientInfo* custom_info);
|
||||
|
||||
// Function pointer type for MiniDumpWriteDump, which is looked up
|
||||
// dynamically.
|
||||
typedef BOOL (WINAPI *MiniDumpWriteDump_type)(
|
||||
HANDLE hProcess,
|
||||
DWORD dwPid,
|
||||
HANDLE hFile,
|
||||
MINIDUMP_TYPE DumpType,
|
||||
CONST PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
|
||||
CONST PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
|
||||
CONST PMINIDUMP_CALLBACK_INFORMATION CallbackParam);
|
||||
|
||||
// Function pointer type for UuidCreate, which is looked up dynamically.
|
||||
typedef RPC_STATUS (RPC_ENTRY *UuidCreate_type)(UUID* Uuid);
|
||||
|
||||
// Runs the main loop for the exception handler thread.
|
||||
static DWORD WINAPI ExceptionHandlerThreadMain(void* lpParameter);
|
||||
|
||||
// Called on the exception thread when an unhandled exception occurs.
|
||||
// Signals the exception handler thread to handle the exception.
|
||||
static LONG WINAPI HandleException(EXCEPTION_POINTERS* exinfo);
|
||||
|
||||
#if _MSC_VER >= 1400 // MSVC 2005/8
|
||||
// This function will be called by some CRT functions when they detect
|
||||
// that they were passed an invalid parameter. Note that in _DEBUG builds,
|
||||
// the CRT may display an assertion dialog before calling this function,
|
||||
// and the function will not be called unless the assertion dialog is
|
||||
// dismissed by clicking "Ignore."
|
||||
static void HandleInvalidParameter(const wchar_t* expression,
|
||||
const wchar_t* function,
|
||||
const wchar_t* file,
|
||||
unsigned int line,
|
||||
uintptr_t reserved);
|
||||
#endif // _MSC_VER >= 1400
|
||||
|
||||
// This function will be called by the CRT when a pure virtual
|
||||
// function is called.
|
||||
static void HandlePureVirtualCall();
|
||||
|
||||
// This is called on the exception thread or on another thread that
|
||||
// the user wishes to produce a dump from. It calls
|
||||
// WriteMinidumpWithException on the handler thread, avoiding stack
|
||||
// overflows and inconsistent dumps due to writing the dump from
|
||||
// the exception thread. If the dump is requested as a result of an
|
||||
// exception, exinfo contains exception information, otherwise, it
|
||||
// is NULL. If the dump is requested as a result of an assertion
|
||||
// (such as an invalid parameter being passed to a CRT function),
|
||||
// assertion contains data about the assertion, otherwise, it is NULL.
|
||||
bool WriteMinidumpOnHandlerThread(EXCEPTION_POINTERS* exinfo,
|
||||
MDRawAssertionInfo* assertion);
|
||||
|
||||
// This function is called on the handler thread. It calls into
|
||||
// WriteMinidumpWithExceptionForProcess() with a handle to the
|
||||
// current process. requesting_thread_id is the ID of the thread
|
||||
// that requested the dump. If the dump is requested as a result of
|
||||
// an exception, exinfo contains exception information, otherwise,
|
||||
// it is NULL.
|
||||
bool WriteMinidumpWithException(DWORD requesting_thread_id,
|
||||
EXCEPTION_POINTERS* exinfo,
|
||||
MDRawAssertionInfo* assertion);
|
||||
|
||||
// This function is used as a callback when calling MinidumpWriteDump,
|
||||
// in order to add additional memory regions to the dump.
|
||||
static BOOL CALLBACK MinidumpWriteDumpCallback(
|
||||
PVOID context,
|
||||
const PMINIDUMP_CALLBACK_INPUT callback_input,
|
||||
PMINIDUMP_CALLBACK_OUTPUT callback_output);
|
||||
|
||||
// This function does the actual writing of a minidump. It is
|
||||
// called on the handler thread. requesting_thread_id is the ID of
|
||||
// the thread that requested the dump, if that information is
|
||||
// meaningful. If the dump is requested as a result of an
|
||||
// exception, exinfo contains exception information, otherwise, it
|
||||
// is NULL. process is the one that will be dumped. If
|
||||
// requesting_thread_id is meaningful and should be added to the
|
||||
// minidump, write_requester_stream is |true|.
|
||||
bool WriteMinidumpWithExceptionForProcess(DWORD requesting_thread_id,
|
||||
EXCEPTION_POINTERS* exinfo,
|
||||
MDRawAssertionInfo* assertion,
|
||||
HANDLE process,
|
||||
bool write_requester_stream);
|
||||
|
||||
// Generates a new ID and stores it in next_minidump_id_, and stores the
|
||||
// path of the next minidump to be written in next_minidump_path_.
|
||||
void UpdateNextID();
|
||||
|
||||
FilterCallback filter_;
|
||||
MinidumpCallback callback_;
|
||||
void* callback_context_;
|
||||
|
||||
scoped_ptr<CrashGenerationClient> crash_generation_client_;
|
||||
|
||||
// The directory in which a minidump will be written, set by the dump_path
|
||||
// argument to the constructor, or set_dump_path.
|
||||
wstring dump_path_;
|
||||
|
||||
// The basename of the next minidump to be written, without the extension.
|
||||
wstring next_minidump_id_;
|
||||
|
||||
// The full pathname of the next minidump to be written, including the file
|
||||
// extension.
|
||||
wstring next_minidump_path_;
|
||||
|
||||
// Pointers to C-string representations of the above. These are set when
|
||||
// the above wstring versions are set in order to avoid calling c_str during
|
||||
// an exception, as c_str may attempt to allocate heap memory. These
|
||||
// pointers are not owned by the ExceptionHandler object, but their lifetimes
|
||||
// should be equivalent to the lifetimes of the associated wstring, provided
|
||||
// that the wstrings are not altered.
|
||||
const wchar_t* dump_path_c_;
|
||||
const wchar_t* next_minidump_id_c_;
|
||||
const wchar_t* next_minidump_path_c_;
|
||||
|
||||
HMODULE dbghelp_module_;
|
||||
MiniDumpWriteDump_type minidump_write_dump_;
|
||||
MINIDUMP_TYPE dump_type_;
|
||||
|
||||
HMODULE rpcrt4_module_;
|
||||
UuidCreate_type uuid_create_;
|
||||
|
||||
// Tracks the handler types that were installed according to the
|
||||
// handler_types constructor argument.
|
||||
int handler_types_;
|
||||
|
||||
// When installed_handler_ is true, previous_filter_ is the unhandled
|
||||
// exception filter that was set prior to installing ExceptionHandler as
|
||||
// the unhandled exception filter and pointing it to |this|. NULL indicates
|
||||
// that there is no previous unhandled exception filter.
|
||||
LPTOP_LEVEL_EXCEPTION_FILTER previous_filter_;
|
||||
|
||||
#if _MSC_VER >= 1400 // MSVC 2005/8
|
||||
// Beginning in VC 8, the CRT provides an invalid parameter handler that will
|
||||
// be called when some CRT functions are passed invalid parameters. In
|
||||
// earlier CRTs, the same conditions would cause unexpected behavior or
|
||||
// crashes.
|
||||
_invalid_parameter_handler previous_iph_;
|
||||
#endif // _MSC_VER >= 1400
|
||||
|
||||
// The CRT allows you to override the default handler for pure
|
||||
// virtual function calls.
|
||||
_purecall_handler previous_pch_;
|
||||
|
||||
// The exception handler thread.
|
||||
HANDLE handler_thread_;
|
||||
|
||||
// True if the exception handler is being destroyed.
|
||||
// Starting with MSVC 2005, Visual C has stronger guarantees on volatile vars.
|
||||
// It has release semantics on write and acquire semantics on reads.
|
||||
// See the msdn documentation.
|
||||
volatile bool is_shutdown_;
|
||||
|
||||
// The critical section enforcing the requirement that only one exception be
|
||||
// handled by a handler at a time.
|
||||
CRITICAL_SECTION handler_critical_section_;
|
||||
|
||||
// Semaphores used to move exception handling between the exception thread
|
||||
// and the handler thread. handler_start_semaphore_ is signalled by the
|
||||
// exception thread to wake up the handler thread when an exception occurs.
|
||||
// handler_finish_semaphore_ is signalled by the handler thread to wake up
|
||||
// the exception thread when handling is complete.
|
||||
HANDLE handler_start_semaphore_;
|
||||
HANDLE handler_finish_semaphore_;
|
||||
|
||||
// The next 2 fields contain data passed from the requesting thread to
|
||||
// the handler thread.
|
||||
|
||||
// The thread ID of the thread requesting the dump (either the exception
|
||||
// thread or any other thread that called WriteMinidump directly).
|
||||
DWORD requesting_thread_id_;
|
||||
|
||||
// The exception info passed to the exception handler on the exception
|
||||
// thread, if an exception occurred. NULL for user-requested dumps.
|
||||
EXCEPTION_POINTERS* exception_info_;
|
||||
|
||||
// If the handler is invoked due to an assertion, this will contain a
|
||||
// pointer to the assertion information. It is NULL at other times.
|
||||
MDRawAssertionInfo* assertion_;
|
||||
|
||||
// The return value of the handler, passed from the handler thread back to
|
||||
// the requesting thread.
|
||||
bool handler_return_value_;
|
||||
|
||||
// If true, the handler will intercept EXCEPTION_BREAKPOINT and
|
||||
// EXCEPTION_SINGLE_STEP exceptions. Leave this false (the default)
|
||||
// to not interfere with debuggers.
|
||||
bool handle_debug_exceptions_;
|
||||
|
||||
// Callers can request additional memory regions to be included in
|
||||
// the dump.
|
||||
AppMemoryList app_memory_info_;
|
||||
|
||||
// A stack of ExceptionHandler objects that have installed unhandled
|
||||
// exception filters. This vector is used by HandleException to determine
|
||||
// which ExceptionHandler object to route an exception to. When an
|
||||
// ExceptionHandler is created with install_handler true, it will append
|
||||
// itself to this list.
|
||||
static vector<ExceptionHandler*>* handler_stack_;
|
||||
|
||||
// The index of the ExceptionHandler in handler_stack_ that will handle the
|
||||
// next exception. Note that 0 means the last entry in handler_stack_, 1
|
||||
// means the next-to-last entry, and so on. This is used by HandleException
|
||||
// to support multiple stacked Breakpad handlers.
|
||||
static LONG handler_stack_index_;
|
||||
|
||||
// handler_stack_critical_section_ guards operations on handler_stack_ and
|
||||
// handler_stack_index_. The critical section is initialized by the
|
||||
// first instance of the class and destroyed by the last instance of it.
|
||||
static CRITICAL_SECTION handler_stack_critical_section_;
|
||||
|
||||
// The number of instances of this class.
|
||||
volatile static LONG instance_count_;
|
||||
|
||||
// disallow copy ctor and operator=
|
||||
explicit ExceptionHandler(const ExceptionHandler &);
|
||||
void operator=(const ExceptionHandler &);
|
||||
};
|
||||
|
||||
} // namespace google_breakpad
|
||||
|
||||
#pragma warning( pop )
|
||||
|
||||
#endif // CLIENT_WINDOWS_HANDLER_EXCEPTION_HANDLER_H__
|
1
google-breakpad/src/tools/gyp/.gitignore
vendored
Normal file
1
google-breakpad/src/tools/gyp/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.pyc
|
6
google-breakpad/src/tools/gyp/AUTHORS
Normal file
6
google-breakpad/src/tools/gyp/AUTHORS
Normal file
@ -0,0 +1,6 @@
|
||||
# Names should be added to this file like so:
|
||||
# Name or Organization <email address>
|
||||
|
||||
Google Inc.
|
||||
Steven Knight <knight@baldmt.com>
|
||||
Ryan Norton <rnorton10@gmail.com>
|
26
google-breakpad/src/tools/gyp/DEPS
Normal file
26
google-breakpad/src/tools/gyp/DEPS
Normal file
@ -0,0 +1,26 @@
|
||||
# DEPS file for gclient use in buildbot execution of gyp tests.
|
||||
#
|
||||
# (You don't need to use gclient for normal GYP development work.)
|
||||
|
||||
vars = {
|
||||
"chrome_trunk": "http://src.chromium.org/svn/trunk",
|
||||
"googlecode_url": "http://%s.googlecode.com/svn",
|
||||
}
|
||||
|
||||
deps = {
|
||||
"scons":
|
||||
Var("chrome_trunk") + "/src/third_party/scons@44099",
|
||||
}
|
||||
|
||||
deps_os = {
|
||||
"win": {
|
||||
"third_party/cygwin":
|
||||
Var("chrome_trunk") + "/deps/third_party/cygwin@66844",
|
||||
|
||||
"third_party/python_26":
|
||||
Var("chrome_trunk") + "/tools/third_party/python_26@89111",
|
||||
|
||||
"src/third_party/pefile":
|
||||
(Var("googlecode_url") % "pefile") + "/trunk@63",
|
||||
},
|
||||
}
|
27
google-breakpad/src/tools/gyp/LICENSE
Normal file
27
google-breakpad/src/tools/gyp/LICENSE
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
21
google-breakpad/src/tools/gyp/MANIFEST
Normal file
21
google-breakpad/src/tools/gyp/MANIFEST
Normal file
@ -0,0 +1,21 @@
|
||||
setup.py
|
||||
gyp
|
||||
LICENSE
|
||||
AUTHORS
|
||||
pylib/gyp/MSVSNew.py
|
||||
pylib/gyp/MSVSProject.py
|
||||
pylib/gyp/MSVSToolFile.py
|
||||
pylib/gyp/MSVSUserFile.py
|
||||
pylib/gyp/MSVSVersion.py
|
||||
pylib/gyp/SCons.py
|
||||
pylib/gyp/__init__.py
|
||||
pylib/gyp/common.py
|
||||
pylib/gyp/input.py
|
||||
pylib/gyp/xcodeproj_file.py
|
||||
pylib/gyp/generator/__init__.py
|
||||
pylib/gyp/generator/gypd.py
|
||||
pylib/gyp/generator/gypsh.py
|
||||
pylib/gyp/generator/make.py
|
||||
pylib/gyp/generator/msvs.py
|
||||
pylib/gyp/generator/scons.py
|
||||
pylib/gyp/generator/xcode.py
|
1
google-breakpad/src/tools/gyp/OWNERS
Normal file
1
google-breakpad/src/tools/gyp/OWNERS
Normal file
@ -0,0 +1 @@
|
||||
*
|
109
google-breakpad/src/tools/gyp/PRESUBMIT.py
Normal file
109
google-breakpad/src/tools/gyp/PRESUBMIT.py
Normal file
@ -0,0 +1,109 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
|
||||
"""Top-level presubmit script for GYP.
|
||||
|
||||
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
|
||||
for more details about the presubmit API built into gcl.
|
||||
"""
|
||||
|
||||
|
||||
PYLINT_BLACKLIST = [
|
||||
# TODO: fix me.
|
||||
# From SCons, not done in google style.
|
||||
'test/lib/TestCmd.py',
|
||||
'test/lib/TestCommon.py',
|
||||
'test/lib/TestGyp.py',
|
||||
# Needs style fix.
|
||||
'pylib/gyp/generator/scons.py',
|
||||
'pylib/gyp/generator/xcode.py',
|
||||
]
|
||||
|
||||
|
||||
PYLINT_DISABLED_WARNINGS = [
|
||||
# TODO: fix me.
|
||||
# Many tests include modules they don't use.
|
||||
'W0611',
|
||||
# Include order doesn't properly include local files?
|
||||
'F0401',
|
||||
# Some use of built-in names.
|
||||
'W0622',
|
||||
# Some unused variables.
|
||||
'W0612',
|
||||
# Operator not preceded/followed by space.
|
||||
'C0323',
|
||||
'C0322',
|
||||
# Unnecessary semicolon.
|
||||
'W0301',
|
||||
# Unused argument.
|
||||
'W0613',
|
||||
# String has no effect (docstring in wrong place).
|
||||
'W0105',
|
||||
# Comma not followed by space.
|
||||
'C0324',
|
||||
# Access to a protected member.
|
||||
'W0212',
|
||||
# Bad indent.
|
||||
'W0311',
|
||||
# Line too long.
|
||||
'C0301',
|
||||
# Undefined variable.
|
||||
'E0602',
|
||||
# Not exception type specified.
|
||||
'W0702',
|
||||
# No member of that name.
|
||||
'E1101',
|
||||
# Dangerous default {}.
|
||||
'W0102',
|
||||
# Others, too many to sort.
|
||||
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
|
||||
'R0201', 'E0101', 'C0321',
|
||||
# ************* Module copy
|
||||
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
|
||||
'W0104',
|
||||
]
|
||||
|
||||
|
||||
def CheckChangeOnUpload(input_api, output_api):
|
||||
report = []
|
||||
report.extend(input_api.canned_checks.PanProjectChecks(
|
||||
input_api, output_api))
|
||||
return report
|
||||
|
||||
|
||||
def CheckChangeOnCommit(input_api, output_api):
|
||||
report = []
|
||||
license = (
|
||||
r'.*? Copyright \(c\) %(year)s Google Inc\. All rights reserved\.\n'
|
||||
r'.*? Use of this source code is governed by a BSD-style license that '
|
||||
r'can be\n'
|
||||
r'.*? found in the LICENSE file\.\n'
|
||||
) % {
|
||||
'year': input_api.time.strftime('%Y'),
|
||||
}
|
||||
|
||||
report.extend(input_api.canned_checks.PanProjectChecks(
|
||||
input_api, output_api, license_header=license))
|
||||
report.extend(input_api.canned_checks.CheckTreeIsOpen(
|
||||
input_api, output_api,
|
||||
'http://gyp-status.appspot.com/status',
|
||||
'http://gyp-status.appspot.com/current'))
|
||||
|
||||
import sys
|
||||
old_sys_path = sys.path
|
||||
try:
|
||||
sys.path = ['pylib', 'test/lib'] + sys.path
|
||||
report.extend(input_api.canned_checks.RunPylint(
|
||||
input_api,
|
||||
output_api,
|
||||
black_list=PYLINT_BLACKLIST,
|
||||
disabled_warnings=PYLINT_DISABLED_WARNINGS))
|
||||
finally:
|
||||
sys.path = old_sys_path
|
||||
return report
|
||||
|
||||
|
||||
def GetPreferredTrySlaves():
|
||||
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac']
|
98
google-breakpad/src/tools/gyp/buildbot/buildbot_run.py
Normal file
98
google-breakpad/src/tools/gyp/buildbot/buildbot_run.py
Normal file
@ -0,0 +1,98 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
|
||||
"""Argument-less script to select what to run on the buildbots."""
|
||||
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
if sys.platform in ['win32', 'cygwin']:
|
||||
EXE_SUFFIX = '.exe'
|
||||
else:
|
||||
EXE_SUFFIX = ''
|
||||
|
||||
|
||||
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
|
||||
ROOT_DIR = os.path.dirname(TRUNK_DIR)
|
||||
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
|
||||
|
||||
|
||||
def GypTestFormat(title, format=None, msvs_version=None):
|
||||
"""Run the gyp tests for a given format, emitting annotator tags.
|
||||
|
||||
See annotator docs at:
|
||||
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
|
||||
Args:
|
||||
format: gyp format to test.
|
||||
Returns:
|
||||
0 for sucesss, 1 for failure.
|
||||
"""
|
||||
if not format:
|
||||
format = title
|
||||
|
||||
print '@@@BUILD_STEP ' + title + '@@@'
|
||||
sys.stdout.flush()
|
||||
env = os.environ.copy()
|
||||
# TODO(bradnelson): remove this when this issue is resolved:
|
||||
# http://code.google.com/p/chromium/issues/detail?id=108251
|
||||
if format == 'ninja':
|
||||
env['NOGOLD'] = '1'
|
||||
if msvs_version:
|
||||
env['GYP_MSVS_VERSION'] = msvs_version
|
||||
retcode = subprocess.call(' '.join(
|
||||
[sys.executable, 'trunk/gyptest.py',
|
||||
'--all',
|
||||
'--passed',
|
||||
'--format', format,
|
||||
'--chdir', 'trunk',
|
||||
'--path', '../scons']),
|
||||
cwd=ROOT_DIR, env=env, shell=True)
|
||||
if retcode:
|
||||
# Emit failure tag, and keep going.
|
||||
print '@@@STEP_FAILURE@@@'
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def GypBuild():
|
||||
# Dump out/ directory.
|
||||
print '@@@BUILD_STEP cleanup@@@'
|
||||
print 'Removing %s...' % OUT_DIR
|
||||
shutil.rmtree(OUT_DIR, ignore_errors=True)
|
||||
print 'Done.'
|
||||
|
||||
retcode = 0
|
||||
if sys.platform.startswith('linux'):
|
||||
retcode += GypTestFormat('ninja')
|
||||
retcode += GypTestFormat('scons')
|
||||
retcode += GypTestFormat('make')
|
||||
elif sys.platform == 'darwin':
|
||||
retcode += GypTestFormat('ninja')
|
||||
retcode += GypTestFormat('xcode')
|
||||
retcode += GypTestFormat('make')
|
||||
elif sys.platform == 'win32':
|
||||
retcode += GypTestFormat('ninja')
|
||||
retcode += GypTestFormat('msvs-2008', format='msvs', msvs_version='2008')
|
||||
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
|
||||
retcode += GypTestFormat('msvs-2010', format='msvs', msvs_version='2010')
|
||||
else:
|
||||
raise Exception('Unknown platform')
|
||||
if retcode:
|
||||
# TODO(bradnelson): once the annotator supports a postscript (section for
|
||||
# after the build proper that could be used for cumulative failures),
|
||||
# use that instead of this. This isolates the final return value so
|
||||
# that it isn't misattributed to the last stage.
|
||||
print '@@@BUILD_STEP failures@@@'
|
||||
sys.exit(retcode)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
GypBuild()
|
10
google-breakpad/src/tools/gyp/codereview.settings
Normal file
10
google-breakpad/src/tools/gyp/codereview.settings
Normal file
@ -0,0 +1,10 @@
|
||||
# This file is used by gcl to get repository specific information.
|
||||
CODE_REVIEW_SERVER: codereview.chromium.org
|
||||
CC_LIST: gyp-developer@googlegroups.com
|
||||
VIEW_VC: http://code.google.com/p/gyp/source/detail?r=
|
||||
TRY_ON_UPLOAD: True
|
||||
TRYSERVER_PROJECT: gyp
|
||||
TRYSERVER_PATCHLEVEL: 0
|
||||
TRYSERVER_ROOT: trunk
|
||||
TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try/try-nacl
|
||||
|
18
google-breakpad/src/tools/gyp/gyp
Normal file
18
google-breakpad/src/tools/gyp/gyp
Normal file
@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2009 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import sys
|
||||
|
||||
# TODO(mark): sys.path manipulation is some temporary testing stuff.
|
||||
try:
|
||||
import gyp
|
||||
except ImportError, e:
|
||||
import os.path
|
||||
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
|
||||
import gyp
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(gyp.main(sys.argv[1:]))
|
5
google-breakpad/src/tools/gyp/gyp.bat
Normal file
5
google-breakpad/src/tools/gyp/gyp.bat
Normal file
@ -0,0 +1,5 @@
|
||||
@rem Copyright (c) 2009 Google Inc. All rights reserved.
|
||||
@rem Use of this source code is governed by a BSD-style license that can be
|
||||
@rem found in the LICENSE file.
|
||||
|
||||
@python "%~dp0/gyp" %*
|
7
google-breakpad/src/tools/gyp/gyp_dummy.c
Normal file
7
google-breakpad/src/tools/gyp/gyp_dummy.c
Normal file
@ -0,0 +1,7 @@
|
||||
/* Copyright (c) 2009 Google Inc. All rights reserved.
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file. */
|
||||
|
||||
int main() {
|
||||
return 0;
|
||||
}
|
264
google-breakpad/src/tools/gyp/gyptest.py
Normal file
264
google-breakpad/src/tools/gyp/gyptest.py
Normal file
@ -0,0 +1,264 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
__doc__ = """
|
||||
gyptest.py -- test runner for GYP tests.
|
||||
"""
|
||||
|
||||
import os
|
||||
import optparse
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
class CommandRunner:
|
||||
"""
|
||||
Executor class for commands, including "commands" implemented by
|
||||
Python functions.
|
||||
"""
|
||||
verbose = True
|
||||
active = True
|
||||
|
||||
def __init__(self, dictionary={}):
|
||||
self.subst_dictionary(dictionary)
|
||||
|
||||
def subst_dictionary(self, dictionary):
|
||||
self._subst_dictionary = dictionary
|
||||
|
||||
def subst(self, string, dictionary=None):
|
||||
"""
|
||||
Substitutes (via the format operator) the values in the specified
|
||||
dictionary into the specified command.
|
||||
|
||||
The command can be an (action, string) tuple. In all cases, we
|
||||
perform substitution on strings and don't worry if something isn't
|
||||
a string. (It's probably a Python function to be executed.)
|
||||
"""
|
||||
if dictionary is None:
|
||||
dictionary = self._subst_dictionary
|
||||
if dictionary:
|
||||
try:
|
||||
string = string % dictionary
|
||||
except TypeError:
|
||||
pass
|
||||
return string
|
||||
|
||||
def display(self, command, stdout=None, stderr=None):
|
||||
if not self.verbose:
|
||||
return
|
||||
if type(command) == type(()):
|
||||
func = command[0]
|
||||
args = command[1:]
|
||||
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
|
||||
if type(command) == type([]):
|
||||
# TODO: quote arguments containing spaces
|
||||
# TODO: handle meta characters?
|
||||
s = ' '.join(command)
|
||||
else:
|
||||
s = self.subst(command)
|
||||
if not s.endswith('\n'):
|
||||
s += '\n'
|
||||
sys.stdout.write(s)
|
||||
sys.stdout.flush()
|
||||
|
||||
def execute(self, command, stdout=None, stderr=None):
|
||||
"""
|
||||
Executes a single command.
|
||||
"""
|
||||
if not self.active:
|
||||
return 0
|
||||
if type(command) == type(''):
|
||||
command = self.subst(command)
|
||||
cmdargs = shlex.split(command)
|
||||
if cmdargs[0] == 'cd':
|
||||
command = (os.chdir,) + tuple(cmdargs[1:])
|
||||
if type(command) == type(()):
|
||||
func = command[0]
|
||||
args = command[1:]
|
||||
return func(*args)
|
||||
else:
|
||||
if stdout is sys.stdout:
|
||||
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
|
||||
subout = None
|
||||
else:
|
||||
# Open pipe for anything else so Popen works on python2.4.
|
||||
subout = subprocess.PIPE
|
||||
if stderr is sys.stderr:
|
||||
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
|
||||
suberr = None
|
||||
elif stderr is None:
|
||||
# Merge with stdout if stderr isn't specified.
|
||||
suberr = subprocess.STDOUT
|
||||
else:
|
||||
# Open pipe for anything else so Popen works on python2.4.
|
||||
suberr = subprocess.PIPE
|
||||
p = subprocess.Popen(command,
|
||||
shell=(sys.platform == 'win32'),
|
||||
stdout=subout,
|
||||
stderr=suberr)
|
||||
p.wait()
|
||||
if stdout is None:
|
||||
self.stdout = p.stdout.read()
|
||||
elif stdout is not sys.stdout:
|
||||
stdout.write(p.stdout.read())
|
||||
if stderr not in (None, sys.stderr):
|
||||
stderr.write(p.stderr.read())
|
||||
return p.returncode
|
||||
|
||||
def run(self, command, display=None, stdout=None, stderr=None):
|
||||
"""
|
||||
Runs a single command, displaying it first.
|
||||
"""
|
||||
if display is None:
|
||||
display = command
|
||||
self.display(display)
|
||||
return self.execute(command, stdout, stderr)
|
||||
|
||||
|
||||
class Unbuffered:
|
||||
def __init__(self, fp):
|
||||
self.fp = fp
|
||||
def write(self, arg):
|
||||
self.fp.write(arg)
|
||||
self.fp.flush()
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.fp, attr)
|
||||
|
||||
sys.stdout = Unbuffered(sys.stdout)
|
||||
sys.stderr = Unbuffered(sys.stderr)
|
||||
|
||||
|
||||
def find_all_gyptest_files(directory):
|
||||
result = []
|
||||
for root, dirs, files in os.walk(directory):
|
||||
if '.svn' in dirs:
|
||||
dirs.remove('.svn')
|
||||
result.extend([ os.path.join(root, f) for f in files
|
||||
if f.startswith('gyptest') and f.endswith('.py') ])
|
||||
result.sort()
|
||||
return result
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
if argv is None:
|
||||
argv = sys.argv
|
||||
|
||||
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
|
||||
parser = optparse.OptionParser(usage=usage)
|
||||
parser.add_option("-a", "--all", action="store_true",
|
||||
help="run all tests")
|
||||
parser.add_option("-C", "--chdir", action="store", default=None,
|
||||
help="chdir to the specified directory")
|
||||
parser.add_option("-f", "--format", action="store", default='',
|
||||
help="run tests with the specified formats")
|
||||
parser.add_option("-G", '--gyp_option', action="append", default=[],
|
||||
help="Add -G options to the gyp command line")
|
||||
parser.add_option("-l", "--list", action="store_true",
|
||||
help="list available tests and exit")
|
||||
parser.add_option("-n", "--no-exec", action="store_true",
|
||||
help="no execute, just print the command line")
|
||||
parser.add_option("--passed", action="store_true",
|
||||
help="report passed tests")
|
||||
parser.add_option("--path", action="append", default=[],
|
||||
help="additional $PATH directory")
|
||||
parser.add_option("-q", "--quiet", action="store_true",
|
||||
help="quiet, don't print test command lines")
|
||||
opts, args = parser.parse_args(argv[1:])
|
||||
|
||||
if opts.chdir:
|
||||
os.chdir(opts.chdir)
|
||||
|
||||
if opts.path:
|
||||
os.environ['PATH'] += ':' + ':'.join(opts.path)
|
||||
|
||||
if not args:
|
||||
if not opts.all:
|
||||
sys.stderr.write('Specify -a to get all tests.\n')
|
||||
return 1
|
||||
args = ['test']
|
||||
|
||||
tests = []
|
||||
for arg in args:
|
||||
if os.path.isdir(arg):
|
||||
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
|
||||
else:
|
||||
tests.append(arg)
|
||||
|
||||
if opts.list:
|
||||
for test in tests:
|
||||
print test
|
||||
sys.exit(0)
|
||||
|
||||
CommandRunner.verbose = not opts.quiet
|
||||
CommandRunner.active = not opts.no_exec
|
||||
cr = CommandRunner()
|
||||
|
||||
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
|
||||
if not opts.quiet:
|
||||
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
|
||||
|
||||
passed = []
|
||||
failed = []
|
||||
no_result = []
|
||||
|
||||
if opts.format:
|
||||
format_list = opts.format.split(',')
|
||||
else:
|
||||
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
|
||||
format_list = {
|
||||
'freebsd7': ['make'],
|
||||
'freebsd8': ['make'],
|
||||
'cygwin': ['msvs'],
|
||||
'win32': ['msvs', 'ninja'],
|
||||
'linux2': ['make', 'ninja'],
|
||||
'linux3': ['make', 'ninja'],
|
||||
'darwin': ['make', 'ninja', 'xcode'],
|
||||
}[sys.platform]
|
||||
|
||||
for format in format_list:
|
||||
os.environ['TESTGYP_FORMAT'] = format
|
||||
if not opts.quiet:
|
||||
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
|
||||
|
||||
gyp_options = []
|
||||
for option in opts.gyp_option:
|
||||
gyp_options += ['-G', option]
|
||||
if gyp_options and not opts.quiet:
|
||||
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
|
||||
|
||||
for test in tests:
|
||||
status = cr.run([sys.executable, test] + gyp_options,
|
||||
stdout=sys.stdout,
|
||||
stderr=sys.stderr)
|
||||
if status == 2:
|
||||
no_result.append(test)
|
||||
elif status:
|
||||
failed.append(test)
|
||||
else:
|
||||
passed.append(test)
|
||||
|
||||
if not opts.quiet:
|
||||
def report(description, tests):
|
||||
if tests:
|
||||
if len(tests) == 1:
|
||||
sys.stdout.write("\n%s the following test:\n" % description)
|
||||
else:
|
||||
fmt = "\n%s the following %d tests:\n"
|
||||
sys.stdout.write(fmt % (description, len(tests)))
|
||||
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
|
||||
|
||||
if opts.passed:
|
||||
report("Passed", passed)
|
||||
report("Failed", failed)
|
||||
report("No result from", no_result)
|
||||
|
||||
if failed:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
341
google-breakpad/src/tools/gyp/pylib/gyp/MSVSNew.py
Normal file
341
google-breakpad/src/tools/gyp/pylib/gyp/MSVSNew.py
Normal file
@ -0,0 +1,341 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""New implementation of Visual Studio project generation for SCons."""
|
||||
|
||||
import os
|
||||
import random
|
||||
|
||||
import gyp.common
|
||||
|
||||
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
|
||||
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
|
||||
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
|
||||
# preserving 2.4 compatibility.
|
||||
try:
|
||||
import hashlib
|
||||
_new_md5 = hashlib.md5
|
||||
except ImportError:
|
||||
import md5
|
||||
_new_md5 = md5.new
|
||||
|
||||
|
||||
# Initialize random number generator
|
||||
random.seed()
|
||||
|
||||
# GUIDs for project types
|
||||
ENTRY_TYPE_GUIDS = {
|
||||
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
|
||||
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Helper functions
|
||||
|
||||
|
||||
def MakeGuid(name, seed='msvs_new'):
|
||||
"""Returns a GUID for the specified target name.
|
||||
|
||||
Args:
|
||||
name: Target name.
|
||||
seed: Seed for MD5 hash.
|
||||
Returns:
|
||||
A GUID-line string calculated from the name and seed.
|
||||
|
||||
This generates something which looks like a GUID, but depends only on the
|
||||
name and seed. This means the same name/seed will always generate the same
|
||||
GUID, so that projects and solutions which refer to each other can explicitly
|
||||
determine the GUID to refer to explicitly. It also means that the GUID will
|
||||
not change when the project for a target is rebuilt.
|
||||
"""
|
||||
# Calculate a MD5 signature for the seed and name.
|
||||
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
|
||||
# Convert most of the signature to GUID form (discard the rest)
|
||||
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
|
||||
+ '-' + d[20:32] + '}')
|
||||
return guid
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class MSVSFolder(object):
|
||||
"""Folder in a Visual Studio project or solution."""
|
||||
|
||||
def __init__(self, path, name = None, entries = None,
|
||||
guid = None, items = None):
|
||||
"""Initializes the folder.
|
||||
|
||||
Args:
|
||||
path: Full path to the folder.
|
||||
name: Name of the folder.
|
||||
entries: List of folder entries to nest inside this folder. May contain
|
||||
Folder or Project objects. May be None, if the folder is empty.
|
||||
guid: GUID to use for folder, if not None.
|
||||
items: List of solution items to include in the folder project. May be
|
||||
None, if the folder does not directly contain items.
|
||||
"""
|
||||
if name:
|
||||
self.name = name
|
||||
else:
|
||||
# Use last layer.
|
||||
self.name = os.path.basename(path)
|
||||
|
||||
self.path = path
|
||||
self.guid = guid
|
||||
|
||||
# Copy passed lists (or set to empty lists)
|
||||
self.entries = list(entries or [])
|
||||
self.items = list(items or [])
|
||||
|
||||
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
|
||||
|
||||
def get_guid(self):
|
||||
if self.guid is None:
|
||||
# Use consistent guids for folders (so things don't regenerate).
|
||||
self.guid = MakeGuid(self.path, seed='msvs_folder')
|
||||
return self.guid
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class MSVSProject(object):
|
||||
"""Visual Studio project."""
|
||||
|
||||
def __init__(self, path, name = None, dependencies = None, guid = None,
|
||||
spec = None, build_file = None, config_platform_overrides = None,
|
||||
fixpath_prefix = None):
|
||||
"""Initializes the project.
|
||||
|
||||
Args:
|
||||
path: Absolute path to the project file.
|
||||
name: Name of project. If None, the name will be the same as the base
|
||||
name of the project file.
|
||||
dependencies: List of other Project objects this project is dependent
|
||||
upon, if not None.
|
||||
guid: GUID to use for project, if not None.
|
||||
spec: Dictionary specifying how to build this project.
|
||||
build_file: Filename of the .gyp file that the vcproj file comes from.
|
||||
config_platform_overrides: optional dict of configuration platforms to
|
||||
used in place of the default for this target.
|
||||
fixpath_prefix: the path used to adjust the behavior of _fixpath
|
||||
"""
|
||||
self.path = path
|
||||
self.guid = guid
|
||||
self.spec = spec
|
||||
self.build_file = build_file
|
||||
# Use project filename if name not specified
|
||||
self.name = name or os.path.splitext(os.path.basename(path))[0]
|
||||
|
||||
# Copy passed lists (or set to empty lists)
|
||||
self.dependencies = list(dependencies or [])
|
||||
|
||||
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
|
||||
|
||||
if config_platform_overrides:
|
||||
self.config_platform_overrides = config_platform_overrides
|
||||
else:
|
||||
self.config_platform_overrides = {}
|
||||
self.fixpath_prefix = fixpath_prefix
|
||||
self.msbuild_toolset = None
|
||||
|
||||
def set_dependencies(self, dependencies):
|
||||
self.dependencies = list(dependencies or [])
|
||||
|
||||
def get_guid(self):
|
||||
if self.guid is None:
|
||||
# Set GUID from path
|
||||
# TODO(rspangler): This is fragile.
|
||||
# 1. We can't just use the project filename sans path, since there could
|
||||
# be multiple projects with the same base name (for example,
|
||||
# foo/unittest.vcproj and bar/unittest.vcproj).
|
||||
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
|
||||
# GUID is the same whether it's included from base/base.sln or
|
||||
# foo/bar/baz/baz.sln.
|
||||
# 3. The GUID needs to be the same each time this builder is invoked, so
|
||||
# that we don't need to rebuild the solution when the project changes.
|
||||
# 4. We should be able to handle pre-built project files by reading the
|
||||
# GUID from the files.
|
||||
self.guid = MakeGuid(self.name)
|
||||
return self.guid
|
||||
|
||||
def set_msbuild_toolset(self, msbuild_toolset):
|
||||
self.msbuild_toolset = msbuild_toolset
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class MSVSSolution:
|
||||
"""Visual Studio solution."""
|
||||
|
||||
def __init__(self, path, version, entries=None, variants=None,
|
||||
websiteProperties=True):
|
||||
"""Initializes the solution.
|
||||
|
||||
Args:
|
||||
path: Path to solution file.
|
||||
version: Format version to emit.
|
||||
entries: List of entries in solution. May contain Folder or Project
|
||||
objects. May be None, if the folder is empty.
|
||||
variants: List of build variant strings. If none, a default list will
|
||||
be used.
|
||||
websiteProperties: Flag to decide if the website properties section
|
||||
is generated.
|
||||
"""
|
||||
self.path = path
|
||||
self.websiteProperties = websiteProperties
|
||||
self.version = version
|
||||
|
||||
# Copy passed lists (or set to empty lists)
|
||||
self.entries = list(entries or [])
|
||||
|
||||
if variants:
|
||||
# Copy passed list
|
||||
self.variants = variants[:]
|
||||
else:
|
||||
# Use default
|
||||
self.variants = ['Debug|Win32', 'Release|Win32']
|
||||
# TODO(rspangler): Need to be able to handle a mapping of solution config
|
||||
# to project config. Should we be able to handle variants being a dict,
|
||||
# or add a separate variant_map variable? If it's a dict, we can't
|
||||
# guarantee the order of variants since dict keys aren't ordered.
|
||||
|
||||
|
||||
# TODO(rspangler): Automatically write to disk for now; should delay until
|
||||
# node-evaluation time.
|
||||
self.Write()
|
||||
|
||||
|
||||
def Write(self, writer=gyp.common.WriteOnDiff):
|
||||
"""Writes the solution file to disk.
|
||||
|
||||
Raises:
|
||||
IndexError: An entry appears multiple times.
|
||||
"""
|
||||
# Walk the entry tree and collect all the folders and projects.
|
||||
all_entries = set()
|
||||
entries_to_check = self.entries[:]
|
||||
while entries_to_check:
|
||||
e = entries_to_check.pop(0)
|
||||
|
||||
# If this entry has been visited, nothing to do.
|
||||
if e in all_entries:
|
||||
continue
|
||||
|
||||
all_entries.add(e)
|
||||
|
||||
# If this is a folder, check its entries too.
|
||||
if isinstance(e, MSVSFolder):
|
||||
entries_to_check += e.entries
|
||||
|
||||
# Sort by name then guid (so things are in order on vs2008).
|
||||
def NameThenGuid(a, b):
|
||||
if a.name < b.name: return -1
|
||||
if a.name > b.name: return 1
|
||||
if a.get_guid() < b.get_guid(): return -1
|
||||
if a.get_guid() > b.get_guid(): return 1
|
||||
return 0
|
||||
|
||||
all_entries = sorted(all_entries, NameThenGuid)
|
||||
|
||||
# Open file and print header
|
||||
f = writer(self.path)
|
||||
f.write('Microsoft Visual Studio Solution File, '
|
||||
'Format Version %s\r\n' % self.version.SolutionVersion())
|
||||
f.write('# %s\r\n' % self.version.Description())
|
||||
|
||||
# Project entries
|
||||
sln_root = os.path.split(self.path)[0]
|
||||
for e in all_entries:
|
||||
relative_path = gyp.common.RelativePath(e.path, sln_root)
|
||||
# msbuild does not accept an empty folder_name.
|
||||
# use '.' in case relative_path is empty.
|
||||
folder_name = relative_path.replace('/', '\\') or '.'
|
||||
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
|
||||
e.entry_type_guid, # Entry type GUID
|
||||
e.name, # Folder name
|
||||
folder_name, # Folder name (again)
|
||||
e.get_guid(), # Entry GUID
|
||||
))
|
||||
|
||||
# TODO(rspangler): Need a way to configure this stuff
|
||||
if self.websiteProperties:
|
||||
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
|
||||
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
|
||||
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
|
||||
'\tEndProjectSection\r\n')
|
||||
|
||||
if isinstance(e, MSVSFolder):
|
||||
if e.items:
|
||||
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
|
||||
for i in e.items:
|
||||
f.write('\t\t%s = %s\r\n' % (i, i))
|
||||
f.write('\tEndProjectSection\r\n')
|
||||
|
||||
if isinstance(e, MSVSProject):
|
||||
if e.dependencies:
|
||||
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
|
||||
for d in e.dependencies:
|
||||
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
|
||||
f.write('\tEndProjectSection\r\n')
|
||||
|
||||
f.write('EndProject\r\n')
|
||||
|
||||
# Global section
|
||||
f.write('Global\r\n')
|
||||
|
||||
# Configurations (variants)
|
||||
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
|
||||
for v in self.variants:
|
||||
f.write('\t\t%s = %s\r\n' % (v, v))
|
||||
f.write('\tEndGlobalSection\r\n')
|
||||
|
||||
# Sort config guids for easier diffing of solution changes.
|
||||
config_guids = []
|
||||
config_guids_overrides = {}
|
||||
for e in all_entries:
|
||||
if isinstance(e, MSVSProject):
|
||||
config_guids.append(e.get_guid())
|
||||
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
|
||||
config_guids.sort()
|
||||
|
||||
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
|
||||
for g in config_guids:
|
||||
for v in self.variants:
|
||||
nv = config_guids_overrides[g].get(v, v)
|
||||
# Pick which project configuration to build for this solution
|
||||
# configuration.
|
||||
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
|
||||
g, # Project GUID
|
||||
v, # Solution build configuration
|
||||
nv, # Project build config for that solution config
|
||||
))
|
||||
|
||||
# Enable project in this solution configuration.
|
||||
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
|
||||
g, # Project GUID
|
||||
v, # Solution build configuration
|
||||
nv, # Project build config for that solution config
|
||||
))
|
||||
f.write('\tEndGlobalSection\r\n')
|
||||
|
||||
# TODO(rspangler): Should be able to configure this stuff too (though I've
|
||||
# never seen this be any different)
|
||||
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
|
||||
f.write('\t\tHideSolutionNode = FALSE\r\n')
|
||||
f.write('\tEndGlobalSection\r\n')
|
||||
|
||||
# Folder mappings
|
||||
# TODO(rspangler): Should omit this section if there are no folders
|
||||
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
|
||||
for e in all_entries:
|
||||
if not isinstance(e, MSVSFolder):
|
||||
continue # Does not apply to projects, only folders
|
||||
for subentry in e.entries:
|
||||
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
|
||||
f.write('\tEndGlobalSection\r\n')
|
||||
|
||||
f.write('EndGlobal\r\n')
|
||||
|
||||
f.close()
|
208
google-breakpad/src/tools/gyp/pylib/gyp/MSVSProject.py
Normal file
208
google-breakpad/src/tools/gyp/pylib/gyp/MSVSProject.py
Normal file
@ -0,0 +1,208 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Visual Studio project reader/writer."""
|
||||
|
||||
import gyp.common
|
||||
import gyp.easy_xml as easy_xml
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class Tool(object):
|
||||
"""Visual Studio tool."""
|
||||
|
||||
def __init__(self, name, attrs=None):
|
||||
"""Initializes the tool.
|
||||
|
||||
Args:
|
||||
name: Tool name.
|
||||
attrs: Dict of tool attributes; may be None.
|
||||
"""
|
||||
self._attrs = attrs or {}
|
||||
self._attrs['Name'] = name
|
||||
|
||||
def _GetSpecification(self):
|
||||
"""Creates an element for the tool.
|
||||
|
||||
Returns:
|
||||
A new xml.dom.Element for the tool.
|
||||
"""
|
||||
return ['Tool', self._attrs]
|
||||
|
||||
class Filter(object):
|
||||
"""Visual Studio filter - that is, a virtual folder."""
|
||||
|
||||
def __init__(self, name, contents=None):
|
||||
"""Initializes the folder.
|
||||
|
||||
Args:
|
||||
name: Filter (folder) name.
|
||||
contents: List of filenames and/or Filter objects contained.
|
||||
"""
|
||||
self.name = name
|
||||
self.contents = list(contents or [])
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class Writer(object):
|
||||
"""Visual Studio XML project writer."""
|
||||
|
||||
def __init__(self, project_path, version, name, guid=None, platforms=None):
|
||||
"""Initializes the project.
|
||||
|
||||
Args:
|
||||
project_path: Path to the project file.
|
||||
version: Format version to emit.
|
||||
name: Name of the project.
|
||||
guid: GUID to use for project, if not None.
|
||||
platforms: Array of string, the supported platforms. If null, ['Win32']
|
||||
"""
|
||||
self.project_path = project_path
|
||||
self.version = version
|
||||
self.name = name
|
||||
self.guid = guid
|
||||
|
||||
# Default to Win32 for platforms.
|
||||
if not platforms:
|
||||
platforms = ['Win32']
|
||||
|
||||
# Initialize the specifications of the various sections.
|
||||
self.platform_section = ['Platforms']
|
||||
for platform in platforms:
|
||||
self.platform_section.append(['Platform', {'Name': platform}])
|
||||
self.tool_files_section = ['ToolFiles']
|
||||
self.configurations_section = ['Configurations']
|
||||
self.files_section = ['Files']
|
||||
|
||||
# Keep a dict keyed on filename to speed up access.
|
||||
self.files_dict = dict()
|
||||
|
||||
def AddToolFile(self, path):
|
||||
"""Adds a tool file to the project.
|
||||
|
||||
Args:
|
||||
path: Relative path from project to tool file.
|
||||
"""
|
||||
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
|
||||
|
||||
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
|
||||
"""Returns the specification for a configuration.
|
||||
|
||||
Args:
|
||||
config_type: Type of configuration node.
|
||||
config_name: Configuration name.
|
||||
attrs: Dict of configuration attributes; may be None.
|
||||
tools: List of tools (strings or Tool objects); may be None.
|
||||
Returns:
|
||||
"""
|
||||
# Handle defaults
|
||||
if not attrs:
|
||||
attrs = {}
|
||||
if not tools:
|
||||
tools = []
|
||||
|
||||
# Add configuration node and its attributes
|
||||
node_attrs = attrs.copy()
|
||||
node_attrs['Name'] = config_name
|
||||
specification = [config_type, node_attrs]
|
||||
|
||||
# Add tool nodes and their attributes
|
||||
if tools:
|
||||
for t in tools:
|
||||
if isinstance(t, Tool):
|
||||
specification.append(t._GetSpecification())
|
||||
else:
|
||||
specification.append(Tool(t)._GetSpecification())
|
||||
return specification
|
||||
|
||||
|
||||
def AddConfig(self, name, attrs=None, tools=None):
|
||||
"""Adds a configuration to the project.
|
||||
|
||||
Args:
|
||||
name: Configuration name.
|
||||
attrs: Dict of configuration attributes; may be None.
|
||||
tools: List of tools (strings or Tool objects); may be None.
|
||||
"""
|
||||
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
|
||||
self.configurations_section.append(spec)
|
||||
|
||||
def _AddFilesToNode(self, parent, files):
|
||||
"""Adds files and/or filters to the parent node.
|
||||
|
||||
Args:
|
||||
parent: Destination node
|
||||
files: A list of Filter objects and/or relative paths to files.
|
||||
|
||||
Will call itself recursively, if the files list contains Filter objects.
|
||||
"""
|
||||
for f in files:
|
||||
if isinstance(f, Filter):
|
||||
node = ['Filter', {'Name': f.name}]
|
||||
self._AddFilesToNode(node, f.contents)
|
||||
else:
|
||||
node = ['File', {'RelativePath': f}]
|
||||
self.files_dict[f] = node
|
||||
parent.append(node)
|
||||
|
||||
def AddFiles(self, files):
|
||||
"""Adds files to the project.
|
||||
|
||||
Args:
|
||||
files: A list of Filter objects and/or relative paths to files.
|
||||
|
||||
This makes a copy of the file/filter tree at the time of this call. If you
|
||||
later add files to a Filter object which was passed into a previous call
|
||||
to AddFiles(), it will not be reflected in this project.
|
||||
"""
|
||||
self._AddFilesToNode(self.files_section, files)
|
||||
# TODO(rspangler) This also doesn't handle adding files to an existing
|
||||
# filter. That is, it doesn't merge the trees.
|
||||
|
||||
def AddFileConfig(self, path, config, attrs=None, tools=None):
|
||||
"""Adds a configuration to a file.
|
||||
|
||||
Args:
|
||||
path: Relative path to the file.
|
||||
config: Name of configuration to add.
|
||||
attrs: Dict of configuration attributes; may be None.
|
||||
tools: List of tools (strings or Tool objects); may be None.
|
||||
|
||||
Raises:
|
||||
ValueError: Relative path does not match any file added via AddFiles().
|
||||
"""
|
||||
# Find the file node with the right relative path
|
||||
parent = self.files_dict.get(path)
|
||||
if not parent:
|
||||
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
|
||||
|
||||
# Add the config to the file node
|
||||
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
|
||||
tools)
|
||||
parent.append(spec)
|
||||
|
||||
def WriteIfChanged(self):
|
||||
"""Writes the project file."""
|
||||
# First create XML content definition
|
||||
content = [
|
||||
'VisualStudioProject',
|
||||
{'ProjectType': 'Visual C++',
|
||||
'Version': self.version.ProjectVersion(),
|
||||
'Name': self.name,
|
||||
'ProjectGUID': self.guid,
|
||||
'RootNamespace': self.name,
|
||||
'Keyword': 'Win32Proj'
|
||||
},
|
||||
self.platform_section,
|
||||
self.tool_files_section,
|
||||
self.configurations_section,
|
||||
['References'], # empty section
|
||||
self.files_section,
|
||||
['Globals'] # empty section
|
||||
]
|
||||
easy_xml.WriteXmlIfChanged(content, self.project_path,
|
||||
encoding="Windows-1252")
|
1046
google-breakpad/src/tools/gyp/pylib/gyp/MSVSSettings.py
Normal file
1046
google-breakpad/src/tools/gyp/pylib/gyp/MSVSSettings.py
Normal file
File diff suppressed because it is too large
Load Diff
1482
google-breakpad/src/tools/gyp/pylib/gyp/MSVSSettings_test.py
Normal file
1482
google-breakpad/src/tools/gyp/pylib/gyp/MSVSSettings_test.py
Normal file
File diff suppressed because it is too large
Load Diff
58
google-breakpad/src/tools/gyp/pylib/gyp/MSVSToolFile.py
Normal file
58
google-breakpad/src/tools/gyp/pylib/gyp/MSVSToolFile.py
Normal file
@ -0,0 +1,58 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Visual Studio project reader/writer."""
|
||||
|
||||
import gyp.common
|
||||
import gyp.easy_xml as easy_xml
|
||||
|
||||
|
||||
class Writer(object):
|
||||
"""Visual Studio XML tool file writer."""
|
||||
|
||||
def __init__(self, tool_file_path, name):
|
||||
"""Initializes the tool file.
|
||||
|
||||
Args:
|
||||
tool_file_path: Path to the tool file.
|
||||
name: Name of the tool file.
|
||||
"""
|
||||
self.tool_file_path = tool_file_path
|
||||
self.name = name
|
||||
self.rules_section = ['Rules']
|
||||
|
||||
def AddCustomBuildRule(self, name, cmd, description,
|
||||
additional_dependencies,
|
||||
outputs, extensions):
|
||||
"""Adds a rule to the tool file.
|
||||
|
||||
Args:
|
||||
name: Name of the rule.
|
||||
description: Description of the rule.
|
||||
cmd: Command line of the rule.
|
||||
additional_dependencies: other files which may trigger the rule.
|
||||
outputs: outputs of the rule.
|
||||
extensions: extensions handled by the rule.
|
||||
"""
|
||||
rule = ['CustomBuildRule',
|
||||
{'Name': name,
|
||||
'ExecutionDescription': description,
|
||||
'CommandLine': cmd,
|
||||
'Outputs': ';'.join(outputs),
|
||||
'FileExtensions': ';'.join(extensions),
|
||||
'AdditionalDependencies':
|
||||
';'.join(additional_dependencies)
|
||||
}]
|
||||
self.rules_section.append(rule)
|
||||
|
||||
def WriteIfChanged(self):
|
||||
"""Writes the tool file."""
|
||||
content = ['VisualStudioToolFile',
|
||||
{'Version': '8.00',
|
||||
'Name': self.name
|
||||
},
|
||||
self.rules_section
|
||||
]
|
||||
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
|
||||
encoding="Windows-1252")
|
147
google-breakpad/src/tools/gyp/pylib/gyp/MSVSUserFile.py
Normal file
147
google-breakpad/src/tools/gyp/pylib/gyp/MSVSUserFile.py
Normal file
@ -0,0 +1,147 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Visual Studio user preferences file writer."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import socket # for gethostname
|
||||
|
||||
import gyp.common
|
||||
import gyp.easy_xml as easy_xml
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
def _FindCommandInPath(command):
|
||||
"""If there are no slashes in the command given, this function
|
||||
searches the PATH env to find the given command, and converts it
|
||||
to an absolute path. We have to do this because MSVS is looking
|
||||
for an actual file to launch a debugger on, not just a command
|
||||
line. Note that this happens at GYP time, so anything needing to
|
||||
be built needs to have a full path."""
|
||||
if '/' in command or '\\' in command:
|
||||
# If the command already has path elements (either relative or
|
||||
# absolute), then assume it is constructed properly.
|
||||
return command
|
||||
else:
|
||||
# Search through the path list and find an existing file that
|
||||
# we can access.
|
||||
paths = os.environ.get('PATH','').split(os.pathsep)
|
||||
for path in paths:
|
||||
item = os.path.join(path, command)
|
||||
if os.path.isfile(item) and os.access(item, os.X_OK):
|
||||
return item
|
||||
return command
|
||||
|
||||
def _QuoteWin32CommandLineArgs(args):
|
||||
new_args = []
|
||||
for arg in args:
|
||||
# Replace all double-quotes with double-double-quotes to escape
|
||||
# them for cmd shell, and then quote the whole thing if there
|
||||
# are any.
|
||||
if arg.find('"') != -1:
|
||||
arg = '""'.join(arg.split('"'))
|
||||
arg = '"%s"' % arg
|
||||
|
||||
# Otherwise, if there are any spaces, quote the whole arg.
|
||||
elif re.search(r'[ \t\n]', arg):
|
||||
arg = '"%s"' % arg
|
||||
new_args.append(arg)
|
||||
return new_args
|
||||
|
||||
class Writer(object):
|
||||
"""Visual Studio XML user user file writer."""
|
||||
|
||||
def __init__(self, user_file_path, version, name):
|
||||
"""Initializes the user file.
|
||||
|
||||
Args:
|
||||
user_file_path: Path to the user file.
|
||||
version: Version info.
|
||||
name: Name of the user file.
|
||||
"""
|
||||
self.user_file_path = user_file_path
|
||||
self.version = version
|
||||
self.name = name
|
||||
self.configurations = {}
|
||||
|
||||
def AddConfig(self, name):
|
||||
"""Adds a configuration to the project.
|
||||
|
||||
Args:
|
||||
name: Configuration name.
|
||||
"""
|
||||
self.configurations[name] = ['Configuration', {'Name': name}]
|
||||
|
||||
def AddDebugSettings(self, config_name, command, environment = {},
|
||||
working_directory=""):
|
||||
"""Adds a DebugSettings node to the user file for a particular config.
|
||||
|
||||
Args:
|
||||
command: command line to run. First element in the list is the
|
||||
executable. All elements of the command will be quoted if
|
||||
necessary.
|
||||
working_directory: other files which may trigger the rule. (optional)
|
||||
"""
|
||||
command = _QuoteWin32CommandLineArgs(command)
|
||||
|
||||
abs_command = _FindCommandInPath(command[0])
|
||||
|
||||
if environment and isinstance(environment, dict):
|
||||
env_list = ['%s="%s"' % (key, val)
|
||||
for (key,val) in environment.iteritems()]
|
||||
environment = ' '.join(env_list)
|
||||
else:
|
||||
environment = ''
|
||||
|
||||
n_cmd = ['DebugSettings',
|
||||
{'Command': abs_command,
|
||||
'WorkingDirectory': working_directory,
|
||||
'CommandArguments': " ".join(command[1:]),
|
||||
'RemoteMachine': socket.gethostname(),
|
||||
'Environment': environment,
|
||||
'EnvironmentMerge': 'true',
|
||||
# Currently these are all "dummy" values that we're just setting
|
||||
# in the default manner that MSVS does it. We could use some of
|
||||
# these to add additional capabilities, I suppose, but they might
|
||||
# not have parity with other platforms then.
|
||||
'Attach': 'false',
|
||||
'DebuggerType': '3', # 'auto' debugger
|
||||
'Remote': '1',
|
||||
'RemoteCommand': '',
|
||||
'HttpUrl': '',
|
||||
'PDBPath': '',
|
||||
'SQLDebugging': '',
|
||||
'DebuggerFlavor': '0',
|
||||
'MPIRunCommand': '',
|
||||
'MPIRunArguments': '',
|
||||
'MPIRunWorkingDirectory': '',
|
||||
'ApplicationCommand': '',
|
||||
'ApplicationArguments': '',
|
||||
'ShimCommand': '',
|
||||
'MPIAcceptMode': '',
|
||||
'MPIAcceptFilter': ''
|
||||
}]
|
||||
|
||||
# Find the config, and add it if it doesn't exist.
|
||||
if config_name not in self.configurations:
|
||||
self.AddConfig(config_name)
|
||||
|
||||
# Add the DebugSettings onto the appropriate config.
|
||||
self.configurations[config_name].append(n_cmd)
|
||||
|
||||
def WriteIfChanged(self):
|
||||
"""Writes the user file."""
|
||||
configs = ['Configurations']
|
||||
for config, spec in sorted(self.configurations.iteritems()):
|
||||
configs.append(spec)
|
||||
|
||||
content = ['VisualStudioUserFile',
|
||||
{'Version': self.version.ProjectVersion(),
|
||||
'Name': self.name
|
||||
},
|
||||
configs]
|
||||
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
|
||||
encoding="Windows-1252")
|
325
google-breakpad/src/tools/gyp/pylib/gyp/MSVSVersion.py
Normal file
325
google-breakpad/src/tools/gyp/pylib/gyp/MSVSVersion.py
Normal file
@ -0,0 +1,325 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Handle version information related to Visual Stuio."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
class VisualStudioVersion(object):
|
||||
"""Information regarding a version of Visual Studio."""
|
||||
|
||||
def __init__(self, short_name, description,
|
||||
solution_version, project_version, flat_sln, uses_vcxproj,
|
||||
path, sdk_based):
|
||||
self.short_name = short_name
|
||||
self.description = description
|
||||
self.solution_version = solution_version
|
||||
self.project_version = project_version
|
||||
self.flat_sln = flat_sln
|
||||
self.uses_vcxproj = uses_vcxproj
|
||||
self.path = path
|
||||
self.sdk_based = sdk_based
|
||||
|
||||
def ShortName(self):
|
||||
return self.short_name
|
||||
|
||||
def Description(self):
|
||||
"""Get the full description of the version."""
|
||||
return self.description
|
||||
|
||||
def SolutionVersion(self):
|
||||
"""Get the version number of the sln files."""
|
||||
return self.solution_version
|
||||
|
||||
def ProjectVersion(self):
|
||||
"""Get the version number of the vcproj or vcxproj files."""
|
||||
return self.project_version
|
||||
|
||||
def FlatSolution(self):
|
||||
return self.flat_sln
|
||||
|
||||
def UsesVcxproj(self):
|
||||
"""Returns true if this version uses a vcxproj file."""
|
||||
return self.uses_vcxproj
|
||||
|
||||
def ProjectExtension(self):
|
||||
"""Returns the file extension for the project."""
|
||||
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
|
||||
|
||||
def Path(self):
|
||||
"""Returns the path to Visual Studio installation."""
|
||||
return self.path
|
||||
|
||||
def ToolPath(self, tool):
|
||||
"""Returns the path to a given compiler tool. """
|
||||
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
|
||||
|
||||
def SetupScript(self, target_arch):
|
||||
"""Returns a command (with arguments) to be used to set up the
|
||||
environment."""
|
||||
# Check if we are running in the SDK command line environment and use
|
||||
# the setup script from the SDK if so. |target_arch| should be either
|
||||
# 'x86' or 'x64'.
|
||||
assert target_arch in ('x86', 'x64')
|
||||
sdk_dir = os.environ.get('WindowsSDKDir')
|
||||
if self.sdk_based and sdk_dir:
|
||||
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
|
||||
'/' + target_arch]
|
||||
else:
|
||||
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
|
||||
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
|
||||
# isn't always.
|
||||
if target_arch == 'x86':
|
||||
return [os.path.normpath(
|
||||
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
|
||||
else:
|
||||
assert target_arch == 'x64'
|
||||
arg = 'x86_amd64'
|
||||
if (os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
|
||||
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
|
||||
# Use the 64-on-64 compiler if we can.
|
||||
arg = 'amd64'
|
||||
return [os.path.normpath(
|
||||
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
|
||||
|
||||
|
||||
def _RegistryQueryBase(sysdir, key, value):
|
||||
"""Use reg.exe to read a particular key.
|
||||
|
||||
While ideally we might use the win32 module, we would like gyp to be
|
||||
python neutral, so for instance cygwin python lacks this module.
|
||||
|
||||
Arguments:
|
||||
sysdir: The system subdirectory to attempt to launch reg.exe from.
|
||||
key: The registry key to read from.
|
||||
value: The particular value to read.
|
||||
Return:
|
||||
stdout from reg.exe, or None for failure.
|
||||
"""
|
||||
# Skip if not on Windows or Python Win32 setup issue
|
||||
if sys.platform not in ('win32', 'cygwin'):
|
||||
return None
|
||||
# Setup params to pass to and attempt to launch reg.exe
|
||||
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
|
||||
'query', key]
|
||||
if value:
|
||||
cmd.extend(['/v', value])
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
|
||||
# Note that the error text may be in [1] in some cases
|
||||
text = p.communicate()[0]
|
||||
# Check return code from reg.exe; officially 0==success and 1==error
|
||||
if p.returncode:
|
||||
return None
|
||||
return text
|
||||
|
||||
|
||||
def _RegistryQuery(key, value=None):
|
||||
"""Use reg.exe to read a particular key through _RegistryQueryBase.
|
||||
|
||||
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
|
||||
that fails, it falls back to System32. Sysnative is available on Vista and
|
||||
up and available on Windows Server 2003 and XP through KB patch 942589. Note
|
||||
that Sysnative will always fail if using 64-bit python due to it being a
|
||||
virtual directory and System32 will work correctly in the first place.
|
||||
|
||||
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
|
||||
|
||||
Arguments:
|
||||
key: The registry key.
|
||||
value: The particular registry value to read (optional).
|
||||
Return:
|
||||
stdout from reg.exe, or None for failure.
|
||||
"""
|
||||
text = None
|
||||
try:
|
||||
text = _RegistryQueryBase('Sysnative', key, value)
|
||||
except OSError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
text = _RegistryQueryBase('System32', key, value)
|
||||
else:
|
||||
raise
|
||||
return text
|
||||
|
||||
|
||||
def _RegistryGetValue(key, value):
|
||||
"""Use reg.exe to obtain the value of a registry key.
|
||||
|
||||
Args:
|
||||
key: The registry key.
|
||||
value: The particular registry value to read.
|
||||
Return:
|
||||
contents of the registry key's value, or None on failure.
|
||||
"""
|
||||
text = _RegistryQuery(key, value)
|
||||
if not text:
|
||||
return None
|
||||
# Extract value.
|
||||
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
|
||||
if not match:
|
||||
return None
|
||||
return match.group(1)
|
||||
|
||||
|
||||
def _RegistryKeyExists(key):
|
||||
"""Use reg.exe to see if a key exists.
|
||||
|
||||
Args:
|
||||
key: The registry key to check.
|
||||
Return:
|
||||
True if the key exists
|
||||
"""
|
||||
if not _RegistryQuery(key):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _CreateVersion(name, path, sdk_based=False):
|
||||
"""Sets up MSVS project generation.
|
||||
|
||||
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
|
||||
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
|
||||
passed in that doesn't match a value in versions python will throw a error.
|
||||
"""
|
||||
versions = {
|
||||
'2010': VisualStudioVersion('2010',
|
||||
'Visual Studio 2010',
|
||||
solution_version='11.00',
|
||||
project_version='4.0',
|
||||
flat_sln=False,
|
||||
uses_vcxproj=True,
|
||||
path=path,
|
||||
sdk_based=sdk_based),
|
||||
'2010e': VisualStudioVersion('2010e',
|
||||
'Visual Studio 2010',
|
||||
solution_version='11.00',
|
||||
project_version='4.0',
|
||||
flat_sln=True,
|
||||
uses_vcxproj=True,
|
||||
path=path,
|
||||
sdk_based=sdk_based),
|
||||
'2008': VisualStudioVersion('2008',
|
||||
'Visual Studio 2008',
|
||||
solution_version='10.00',
|
||||
project_version='9.00',
|
||||
flat_sln=False,
|
||||
uses_vcxproj=False,
|
||||
path=path,
|
||||
sdk_based=sdk_based),
|
||||
'2008e': VisualStudioVersion('2008e',
|
||||
'Visual Studio 2008',
|
||||
solution_version='10.00',
|
||||
project_version='9.00',
|
||||
flat_sln=True,
|
||||
uses_vcxproj=False,
|
||||
path=path,
|
||||
sdk_based=sdk_based),
|
||||
'2005': VisualStudioVersion('2005',
|
||||
'Visual Studio 2005',
|
||||
solution_version='9.00',
|
||||
project_version='8.00',
|
||||
flat_sln=False,
|
||||
uses_vcxproj=False,
|
||||
path=path,
|
||||
sdk_based=sdk_based),
|
||||
'2005e': VisualStudioVersion('2005e',
|
||||
'Visual Studio 2005',
|
||||
solution_version='9.00',
|
||||
project_version='8.00',
|
||||
flat_sln=True,
|
||||
uses_vcxproj=False,
|
||||
path=path,
|
||||
sdk_based=sdk_based),
|
||||
}
|
||||
return versions[str(name)]
|
||||
|
||||
|
||||
def _DetectVisualStudioVersions(versions_to_check, force_express):
|
||||
"""Collect the list of installed visual studio versions.
|
||||
|
||||
Returns:
|
||||
A list of visual studio versions installed in descending order of
|
||||
usage preference.
|
||||
Base this on the registry and a quick check if devenv.exe exists.
|
||||
Only versions 8-10 are considered.
|
||||
Possibilities are:
|
||||
2005(e) - Visual Studio 2005 (8)
|
||||
2008(e) - Visual Studio 2008 (9)
|
||||
2010(e) - Visual Studio 2010 (10)
|
||||
Where (e) is e for express editions of MSVS and blank otherwise.
|
||||
"""
|
||||
version_to_year = {'8.0': '2005', '9.0': '2008', '10.0': '2010'}
|
||||
versions = []
|
||||
for version in versions_to_check:
|
||||
# Old method of searching for which VS version is installed
|
||||
# We don't use the 2010-encouraged-way because we also want to get the
|
||||
# path to the binaries, which it doesn't offer.
|
||||
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
|
||||
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
|
||||
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
|
||||
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
|
||||
for index in range(len(keys)):
|
||||
path = _RegistryGetValue(keys[index], 'InstallDir')
|
||||
if not path:
|
||||
continue
|
||||
# Check for full.
|
||||
full_path = os.path.join(path, 'devenv.exe')
|
||||
express_path = os.path.join(path, 'vcexpress.exe')
|
||||
if not force_express and os.path.exists(full_path):
|
||||
# Add this one.
|
||||
versions.append(_CreateVersion(version_to_year[version],
|
||||
os.path.join(path, '..', '..')))
|
||||
# Check for express.
|
||||
elif os.path.exists(express_path):
|
||||
# Add this one.
|
||||
versions.append(_CreateVersion(version_to_year[version] + 'e',
|
||||
os.path.join(path, '..', '..')))
|
||||
|
||||
# The old method above does not work when only SDK is installed.
|
||||
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
|
||||
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
|
||||
for index in range(len(keys)):
|
||||
path = _RegistryGetValue(keys[index], version)
|
||||
if not path:
|
||||
continue
|
||||
versions.append(_CreateVersion(version_to_year[version] + 'e',
|
||||
os.path.join(path, '..'), sdk_based=True))
|
||||
|
||||
return versions
|
||||
|
||||
|
||||
def SelectVisualStudioVersion(version='auto'):
|
||||
"""Select which version of Visual Studio projects to generate.
|
||||
|
||||
Arguments:
|
||||
version: Hook to allow caller to force a particular version (vs auto).
|
||||
Returns:
|
||||
An object representing a visual studio project format version.
|
||||
"""
|
||||
# In auto mode, check environment variable for override.
|
||||
if version == 'auto':
|
||||
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
|
||||
version_map = {
|
||||
'auto': ('10.0', '9.0', '8.0'),
|
||||
'2005': ('8.0',),
|
||||
'2005e': ('8.0',),
|
||||
'2008': ('9.0',),
|
||||
'2008e': ('9.0',),
|
||||
'2010': ('10.0',),
|
||||
'2010e': ('10.0',),
|
||||
}
|
||||
version = str(version)
|
||||
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
|
||||
if not versions:
|
||||
if version == 'auto':
|
||||
# Default to 2005 if we couldn't find anything
|
||||
return _CreateVersion('2005', None)
|
||||
else:
|
||||
return _CreateVersion(version, None)
|
||||
return versions[0]
|
199
google-breakpad/src/tools/gyp/pylib/gyp/SCons.py
Normal file
199
google-breakpad/src/tools/gyp/pylib/gyp/SCons.py
Normal file
@ -0,0 +1,199 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""
|
||||
SCons generator.
|
||||
|
||||
This contains class definitions and supporting functions for generating
|
||||
pieces of SCons files for the different types of GYP targets.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def WriteList(fp, list, prefix='',
|
||||
separator=',\n ',
|
||||
preamble=None,
|
||||
postamble=None):
|
||||
fp.write(preamble or '')
|
||||
fp.write((separator or ' ').join([prefix + l for l in list]))
|
||||
fp.write(postamble or '')
|
||||
|
||||
|
||||
class TargetBase(object):
|
||||
"""
|
||||
Base class for a SCons representation of a GYP target.
|
||||
"""
|
||||
is_ignored = False
|
||||
target_prefix = ''
|
||||
target_suffix = ''
|
||||
def __init__(self, spec):
|
||||
self.spec = spec
|
||||
def full_product_name(self):
|
||||
"""
|
||||
Returns the full name of the product being built:
|
||||
|
||||
* Uses 'product_name' if it's set, else prefix + 'target_name'.
|
||||
* Prepends 'product_dir' if set.
|
||||
* Appends SCons suffix variables for the target type (or
|
||||
product_extension).
|
||||
"""
|
||||
suffix = self.target_suffix
|
||||
product_extension = self.spec.get('product_extension')
|
||||
if product_extension:
|
||||
suffix = '.' + product_extension
|
||||
prefix = self.spec.get('product_prefix', self.target_prefix)
|
||||
name = self.spec['target_name']
|
||||
name = prefix + self.spec.get('product_name', name) + suffix
|
||||
product_dir = self.spec.get('product_dir')
|
||||
if product_dir:
|
||||
name = os.path.join(product_dir, name)
|
||||
else:
|
||||
name = os.path.join(self.out_dir, name)
|
||||
return name
|
||||
|
||||
def write_input_files(self, fp):
|
||||
"""
|
||||
Writes the definition of the input files (sources).
|
||||
"""
|
||||
sources = self.spec.get('sources')
|
||||
if not sources:
|
||||
fp.write('\ninput_files = []\n')
|
||||
return
|
||||
preamble = '\ninput_files = [\n '
|
||||
postamble = ',\n]\n'
|
||||
WriteList(fp, map(repr, sources), preamble=preamble, postamble=postamble)
|
||||
|
||||
def builder_call(self):
|
||||
"""
|
||||
Returns the actual SCons builder call to build this target.
|
||||
"""
|
||||
name = self.full_product_name()
|
||||
return 'env.%s(env.File(%r), input_files)' % (self.builder_name, name)
|
||||
def write_target(self, fp, src_dir='', pre=''):
|
||||
"""
|
||||
Writes the lines necessary to build this target.
|
||||
"""
|
||||
fp.write('\n' + pre)
|
||||
fp.write('_outputs = %s\n' % self.builder_call())
|
||||
fp.write('target_files.extend(_outputs)\n')
|
||||
|
||||
|
||||
class NoneTarget(TargetBase):
|
||||
"""
|
||||
A GYP target type of 'none', implicitly or explicitly.
|
||||
"""
|
||||
def write_target(self, fp, src_dir='', pre=''):
|
||||
fp.write('\ntarget_files.extend(input_files)\n')
|
||||
|
||||
|
||||
class SettingsTarget(TargetBase):
|
||||
"""
|
||||
A GYP target type of 'settings'.
|
||||
"""
|
||||
is_ignored = True
|
||||
|
||||
|
||||
compilable_sources_template = """
|
||||
_result = []
|
||||
for infile in input_files:
|
||||
if env.compilable(infile):
|
||||
if (type(infile) == type('')
|
||||
and (infile.startswith(%(src_dir)r)
|
||||
or not os.path.isabs(env.subst(infile)))):
|
||||
# Force files below the build directory by replacing all '..'
|
||||
# elements in the path with '__':
|
||||
base, ext = os.path.splitext(os.path.normpath(infile))
|
||||
base = [d == '..' and '__' or d for d in base.split('/')]
|
||||
base = os.path.join(*base)
|
||||
object = '${OBJ_DIR}/${COMPONENT_NAME}/${TARGET_NAME}/' + base
|
||||
if not infile.startswith(%(src_dir)r):
|
||||
infile = %(src_dir)r + infile
|
||||
infile = env.%(name)s(object, infile)[0]
|
||||
else:
|
||||
infile = env.%(name)s(infile)[0]
|
||||
_result.append(infile)
|
||||
input_files = _result
|
||||
"""
|
||||
|
||||
class CompilableSourcesTargetBase(TargetBase):
|
||||
"""
|
||||
An abstract base class for targets that compile their source files.
|
||||
|
||||
We explicitly transform compilable files into object files,
|
||||
even though SCons could infer that for us, because we want
|
||||
to control where the object file ends up. (The implicit rules
|
||||
in SCons always put the object file next to the source file.)
|
||||
"""
|
||||
intermediate_builder_name = None
|
||||
def write_target(self, fp, src_dir='', pre=''):
|
||||
if self.intermediate_builder_name is None:
|
||||
raise NotImplementedError
|
||||
if src_dir and not src_dir.endswith('/'):
|
||||
src_dir += '/'
|
||||
variables = {
|
||||
'src_dir': src_dir,
|
||||
'name': self.intermediate_builder_name,
|
||||
}
|
||||
fp.write(compilable_sources_template % variables)
|
||||
super(CompilableSourcesTargetBase, self).write_target(fp)
|
||||
|
||||
|
||||
class ProgramTarget(CompilableSourcesTargetBase):
|
||||
"""
|
||||
A GYP target type of 'executable'.
|
||||
"""
|
||||
builder_name = 'GypProgram'
|
||||
intermediate_builder_name = 'StaticObject'
|
||||
target_prefix = '${PROGPREFIX}'
|
||||
target_suffix = '${PROGSUFFIX}'
|
||||
out_dir = '${TOP_BUILDDIR}'
|
||||
|
||||
|
||||
class StaticLibraryTarget(CompilableSourcesTargetBase):
|
||||
"""
|
||||
A GYP target type of 'static_library'.
|
||||
"""
|
||||
builder_name = 'GypStaticLibrary'
|
||||
intermediate_builder_name = 'StaticObject'
|
||||
target_prefix = '${LIBPREFIX}'
|
||||
target_suffix = '${LIBSUFFIX}'
|
||||
out_dir = '${LIB_DIR}'
|
||||
|
||||
|
||||
class SharedLibraryTarget(CompilableSourcesTargetBase):
|
||||
"""
|
||||
A GYP target type of 'shared_library'.
|
||||
"""
|
||||
builder_name = 'GypSharedLibrary'
|
||||
intermediate_builder_name = 'SharedObject'
|
||||
target_prefix = '${SHLIBPREFIX}'
|
||||
target_suffix = '${SHLIBSUFFIX}'
|
||||
out_dir = '${LIB_DIR}'
|
||||
|
||||
|
||||
class LoadableModuleTarget(CompilableSourcesTargetBase):
|
||||
"""
|
||||
A GYP target type of 'loadable_module'.
|
||||
"""
|
||||
builder_name = 'GypLoadableModule'
|
||||
intermediate_builder_name = 'SharedObject'
|
||||
target_prefix = '${SHLIBPREFIX}'
|
||||
target_suffix = '${SHLIBSUFFIX}'
|
||||
out_dir = '${TOP_BUILDDIR}'
|
||||
|
||||
|
||||
TargetMap = {
|
||||
None : NoneTarget,
|
||||
'none' : NoneTarget,
|
||||
'settings' : SettingsTarget,
|
||||
'executable' : ProgramTarget,
|
||||
'static_library' : StaticLibraryTarget,
|
||||
'shared_library' : SharedLibraryTarget,
|
||||
'loadable_module' : LoadableModuleTarget,
|
||||
}
|
||||
|
||||
|
||||
def Target(spec):
|
||||
return TargetMap[spec.get('type')](spec)
|
517
google-breakpad/src/tools/gyp/pylib/gyp/__init__.py
Normal file
517
google-breakpad/src/tools/gyp/pylib/gyp/__init__.py
Normal file
@ -0,0 +1,517 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import copy
|
||||
import gyp.input
|
||||
import optparse
|
||||
import os.path
|
||||
import re
|
||||
import shlex
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
# Default debug modes for GYP
|
||||
debug = {}
|
||||
|
||||
# List of "official" debug modes, but you can use anything you like.
|
||||
DEBUG_GENERAL = 'general'
|
||||
DEBUG_VARIABLES = 'variables'
|
||||
DEBUG_INCLUDES = 'includes'
|
||||
|
||||
|
||||
def DebugOutput(mode, message):
|
||||
if 'all' in gyp.debug.keys() or mode in gyp.debug.keys():
|
||||
ctx = ('unknown', 0, 'unknown')
|
||||
try:
|
||||
f = traceback.extract_stack(limit=2)
|
||||
if f:
|
||||
ctx = f[0][:3]
|
||||
except:
|
||||
pass
|
||||
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
|
||||
ctx[1], ctx[2], message)
|
||||
|
||||
def FindBuildFiles():
|
||||
extension = '.gyp'
|
||||
files = os.listdir(os.getcwd())
|
||||
build_files = []
|
||||
for file in files:
|
||||
if file.endswith(extension):
|
||||
build_files.append(file)
|
||||
return build_files
|
||||
|
||||
|
||||
class GypError(Exception):
|
||||
"""Error class representing an error, which is to be presented
|
||||
to the user. The main entry point will catch and display this.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def Load(build_files, format, default_variables={},
|
||||
includes=[], depth='.', params=None, check=False, circular_check=True):
|
||||
"""
|
||||
Loads one or more specified build files.
|
||||
default_variables and includes will be copied before use.
|
||||
Returns the generator for the specified format and the
|
||||
data returned by loading the specified build files.
|
||||
"""
|
||||
if params is None:
|
||||
params = {}
|
||||
|
||||
flavor = None
|
||||
if '-' in format:
|
||||
format, params['flavor'] = format.split('-', 1)
|
||||
|
||||
default_variables = copy.copy(default_variables)
|
||||
|
||||
# Default variables provided by this program and its modules should be
|
||||
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
|
||||
# avoiding collisions with user and automatic variables.
|
||||
default_variables['GENERATOR'] = format
|
||||
|
||||
# Format can be a custom python file, or by default the name of a module
|
||||
# within gyp.generator.
|
||||
if format.endswith('.py'):
|
||||
generator_name = os.path.splitext(format)[0]
|
||||
path, generator_name = os.path.split(generator_name)
|
||||
|
||||
# Make sure the path to the custom generator is in sys.path
|
||||
# Don't worry about removing it once we are done. Keeping the path
|
||||
# to each generator that is used in sys.path is likely harmless and
|
||||
# arguably a good idea.
|
||||
path = os.path.abspath(path)
|
||||
if path not in sys.path:
|
||||
sys.path.insert(0, path)
|
||||
else:
|
||||
generator_name = 'gyp.generator.' + format
|
||||
|
||||
# These parameters are passed in order (as opposed to by key)
|
||||
# because ActivePython cannot handle key parameters to __import__.
|
||||
generator = __import__(generator_name, globals(), locals(), generator_name)
|
||||
for (key, val) in generator.generator_default_variables.items():
|
||||
default_variables.setdefault(key, val)
|
||||
|
||||
# Give the generator the opportunity to set additional variables based on
|
||||
# the params it will receive in the output phase.
|
||||
if getattr(generator, 'CalculateVariables', None):
|
||||
generator.CalculateVariables(default_variables, params)
|
||||
|
||||
# Give the generator the opportunity to set generator_input_info based on
|
||||
# the params it will receive in the output phase.
|
||||
if getattr(generator, 'CalculateGeneratorInputInfo', None):
|
||||
generator.CalculateGeneratorInputInfo(params)
|
||||
|
||||
# Fetch the generator specific info that gets fed to input, we use getattr
|
||||
# so we can default things and the generators only have to provide what
|
||||
# they need.
|
||||
generator_input_info = {
|
||||
'generator_wants_absolute_build_file_paths':
|
||||
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
|
||||
'generator_handles_variants':
|
||||
getattr(generator, 'generator_handles_variants', False),
|
||||
'non_configuration_keys':
|
||||
getattr(generator, 'generator_additional_non_configuration_keys', []),
|
||||
'path_sections':
|
||||
getattr(generator, 'generator_additional_path_sections', []),
|
||||
'extra_sources_for_rules':
|
||||
getattr(generator, 'generator_extra_sources_for_rules', []),
|
||||
'generator_supports_multiple_toolsets':
|
||||
getattr(generator, 'generator_supports_multiple_toolsets', False),
|
||||
'generator_wants_static_library_dependencies_adjusted':
|
||||
getattr(generator,
|
||||
'generator_wants_static_library_dependencies_adjusted', True),
|
||||
'generator_wants_sorted_dependencies':
|
||||
getattr(generator, 'generator_wants_sorted_dependencies', False),
|
||||
}
|
||||
|
||||
# Process the input specific to this generator.
|
||||
result = gyp.input.Load(build_files, default_variables, includes[:],
|
||||
depth, generator_input_info, check, circular_check)
|
||||
return [generator] + result
|
||||
|
||||
def NameValueListToDict(name_value_list):
|
||||
"""
|
||||
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
|
||||
of the pairs. If a string is simply NAME, then the value in the dictionary
|
||||
is set to True. If VALUE can be converted to an integer, it is.
|
||||
"""
|
||||
result = { }
|
||||
for item in name_value_list:
|
||||
tokens = item.split('=', 1)
|
||||
if len(tokens) == 2:
|
||||
# If we can make it an int, use that, otherwise, use the string.
|
||||
try:
|
||||
token_value = int(tokens[1])
|
||||
except ValueError:
|
||||
token_value = tokens[1]
|
||||
# Set the variable to the supplied value.
|
||||
result[tokens[0]] = token_value
|
||||
else:
|
||||
# No value supplied, treat it as a boolean and set it.
|
||||
result[tokens[0]] = True
|
||||
return result
|
||||
|
||||
def ShlexEnv(env_name):
|
||||
flags = os.environ.get(env_name, [])
|
||||
if flags:
|
||||
flags = shlex.split(flags)
|
||||
return flags
|
||||
|
||||
def FormatOpt(opt, value):
|
||||
if opt.startswith('--'):
|
||||
return '%s=%s' % (opt, value)
|
||||
return opt + value
|
||||
|
||||
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
|
||||
"""Regenerate a list of command line flags, for an option of action='append'.
|
||||
|
||||
The |env_name|, if given, is checked in the environment and used to generate
|
||||
an initial list of options, then the options that were specified on the
|
||||
command line (given in |values|) are appended. This matches the handling of
|
||||
environment variables and command line flags where command line flags override
|
||||
the environment, while not requiring the environment to be set when the flags
|
||||
are used again.
|
||||
"""
|
||||
flags = []
|
||||
if options.use_environment and env_name:
|
||||
for flag_value in ShlexEnv(env_name):
|
||||
value = FormatOpt(flag, predicate(flag_value))
|
||||
if value in flags:
|
||||
flags.remove(value)
|
||||
flags.append(value)
|
||||
if values:
|
||||
for flag_value in values:
|
||||
flags.append(FormatOpt(flag, predicate(flag_value)))
|
||||
return flags
|
||||
|
||||
def RegenerateFlags(options):
|
||||
"""Given a parsed options object, and taking the environment variables into
|
||||
account, returns a list of flags that should regenerate an equivalent options
|
||||
object (even in the absence of the environment variables.)
|
||||
|
||||
Any path options will be normalized relative to depth.
|
||||
|
||||
The format flag is not included, as it is assumed the calling generator will
|
||||
set that as appropriate.
|
||||
"""
|
||||
def FixPath(path):
|
||||
path = gyp.common.FixIfRelativePath(path, options.depth)
|
||||
if not path:
|
||||
return os.path.curdir
|
||||
return path
|
||||
|
||||
def Noop(value):
|
||||
return value
|
||||
|
||||
# We always want to ignore the environment when regenerating, to avoid
|
||||
# duplicate or changed flags in the environment at the time of regeneration.
|
||||
flags = ['--ignore-environment']
|
||||
for name, metadata in options._regeneration_metadata.iteritems():
|
||||
opt = metadata['opt']
|
||||
value = getattr(options, name)
|
||||
value_predicate = metadata['type'] == 'path' and FixPath or Noop
|
||||
action = metadata['action']
|
||||
env_name = metadata['env_name']
|
||||
if action == 'append':
|
||||
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
|
||||
env_name, options))
|
||||
elif action in ('store', None): # None is a synonym for 'store'.
|
||||
if value:
|
||||
flags.append(FormatOpt(opt, value_predicate(value)))
|
||||
elif options.use_environment and env_name and os.environ.get(env_name):
|
||||
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
|
||||
elif action in ('store_true', 'store_false'):
|
||||
if ((action == 'store_true' and value) or
|
||||
(action == 'store_false' and not value)):
|
||||
flags.append(opt)
|
||||
elif options.use_environment and env_name:
|
||||
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
|
||||
'for %s flag %r env_name %r' % (action, opt,
|
||||
env_name))
|
||||
else:
|
||||
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
|
||||
'flag %r' % (action, opt))
|
||||
|
||||
return flags
|
||||
|
||||
class RegeneratableOptionParser(optparse.OptionParser):
|
||||
def __init__(self):
|
||||
self.__regeneratable_options = {}
|
||||
optparse.OptionParser.__init__(self)
|
||||
|
||||
def add_option(self, *args, **kw):
|
||||
"""Add an option to the parser.
|
||||
|
||||
This accepts the same arguments as OptionParser.add_option, plus the
|
||||
following:
|
||||
regenerate: can be set to False to prevent this option from being included
|
||||
in regeneration.
|
||||
env_name: name of environment variable that additional values for this
|
||||
option come from.
|
||||
type: adds type='path', to tell the regenerator that the values of
|
||||
this option need to be made relative to options.depth
|
||||
"""
|
||||
env_name = kw.pop('env_name', None)
|
||||
if 'dest' in kw and kw.pop('regenerate', True):
|
||||
dest = kw['dest']
|
||||
|
||||
# The path type is needed for regenerating, for optparse we can just treat
|
||||
# it as a string.
|
||||
type = kw.get('type')
|
||||
if type == 'path':
|
||||
kw['type'] = 'string'
|
||||
|
||||
self.__regeneratable_options[dest] = {
|
||||
'action': kw.get('action'),
|
||||
'type': type,
|
||||
'env_name': env_name,
|
||||
'opt': args[0],
|
||||
}
|
||||
|
||||
optparse.OptionParser.add_option(self, *args, **kw)
|
||||
|
||||
def parse_args(self, *args):
|
||||
values, args = optparse.OptionParser.parse_args(self, *args)
|
||||
values._regeneration_metadata = self.__regeneratable_options
|
||||
return values, args
|
||||
|
||||
def gyp_main(args):
|
||||
my_name = os.path.basename(sys.argv[0])
|
||||
|
||||
parser = RegeneratableOptionParser()
|
||||
usage = 'usage: %s [options ...] [build_file ...]'
|
||||
parser.set_usage(usage.replace('%s', '%prog'))
|
||||
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
|
||||
env_name='GYP_DEFINES',
|
||||
help='sets variable VAR to value VAL')
|
||||
parser.add_option('-f', '--format', dest='formats', action='append',
|
||||
env_name='GYP_GENERATORS', regenerate=False,
|
||||
help='output formats to generate')
|
||||
parser.add_option('--msvs-version', dest='msvs_version',
|
||||
regenerate=False,
|
||||
help='Deprecated; use -G msvs_version=MSVS_VERSION instead')
|
||||
parser.add_option('-I', '--include', dest='includes', action='append',
|
||||
metavar='INCLUDE', type='path',
|
||||
help='files to include in all loaded .gyp files')
|
||||
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
|
||||
help='set DEPTH gyp variable to a relative path to PATH')
|
||||
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
|
||||
action='append', default=[], help='turn on a debugging '
|
||||
'mode for debugging GYP. Supported modes are "variables", '
|
||||
'"includes" and "general" or "all" for all of them.')
|
||||
parser.add_option('-S', '--suffix', dest='suffix', default='',
|
||||
help='suffix to add to generated files')
|
||||
parser.add_option('-G', dest='generator_flags', action='append', default=[],
|
||||
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
|
||||
help='sets generator flag FLAG to VAL')
|
||||
parser.add_option('--generator-output', dest='generator_output',
|
||||
action='store', default=None, metavar='DIR', type='path',
|
||||
env_name='GYP_GENERATOR_OUTPUT',
|
||||
help='puts generated build files under DIR')
|
||||
parser.add_option('--ignore-environment', dest='use_environment',
|
||||
action='store_false', default=True, regenerate=False,
|
||||
help='do not read options from environment variables')
|
||||
parser.add_option('--check', dest='check', action='store_true',
|
||||
help='check format of gyp files')
|
||||
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
|
||||
default=None, metavar='DIR', type='path',
|
||||
help='directory to use as the root of the source tree')
|
||||
# --no-circular-check disables the check for circular relationships between
|
||||
# .gyp files. These relationships should not exist, but they've only been
|
||||
# observed to be harmful with the Xcode generator. Chromium's .gyp files
|
||||
# currently have some circular relationships on non-Mac platforms, so this
|
||||
# option allows the strict behavior to be used on Macs and the lenient
|
||||
# behavior to be used elsewhere.
|
||||
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
|
||||
parser.add_option('--no-circular-check', dest='circular_check',
|
||||
action='store_false', default=True, regenerate=False,
|
||||
help="don't check for circular relationships between files")
|
||||
|
||||
# We read a few things from ~/.gyp, so set up a var for that.
|
||||
home_vars = ['HOME']
|
||||
if sys.platform in ('cygwin', 'win32'):
|
||||
home_vars.append('USERPROFILE')
|
||||
home = None
|
||||
home_dot_gyp = None
|
||||
for home_var in home_vars:
|
||||
home = os.getenv(home_var)
|
||||
if home != None:
|
||||
home_dot_gyp = os.path.join(home, '.gyp')
|
||||
if not os.path.exists(home_dot_gyp):
|
||||
home_dot_gyp = None
|
||||
else:
|
||||
break
|
||||
|
||||
# TODO(thomasvl): add support for ~/.gyp/defaults
|
||||
|
||||
options, build_files_arg = parser.parse_args(args)
|
||||
build_files = build_files_arg
|
||||
|
||||
if not options.formats:
|
||||
# If no format was given on the command line, then check the env variable.
|
||||
generate_formats = []
|
||||
if options.use_environment:
|
||||
generate_formats = os.environ.get('GYP_GENERATORS', [])
|
||||
if generate_formats:
|
||||
generate_formats = re.split('[\s,]', generate_formats)
|
||||
if generate_formats:
|
||||
options.formats = generate_formats
|
||||
else:
|
||||
# Nothing in the variable, default based on platform.
|
||||
if sys.platform == 'darwin':
|
||||
options.formats = ['xcode']
|
||||
elif sys.platform in ('win32', 'cygwin'):
|
||||
options.formats = ['msvs']
|
||||
else:
|
||||
options.formats = ['make']
|
||||
|
||||
if not options.generator_output and options.use_environment:
|
||||
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
|
||||
if g_o:
|
||||
options.generator_output = g_o
|
||||
|
||||
for mode in options.debug:
|
||||
gyp.debug[mode] = 1
|
||||
|
||||
# Do an extra check to avoid work when we're not debugging.
|
||||
if DEBUG_GENERAL in gyp.debug.keys():
|
||||
DebugOutput(DEBUG_GENERAL, 'running with these options:')
|
||||
for option, value in sorted(options.__dict__.items()):
|
||||
if option[0] == '_':
|
||||
continue
|
||||
if isinstance(value, basestring):
|
||||
DebugOutput(DEBUG_GENERAL, " %s: '%s'" % (option, value))
|
||||
else:
|
||||
DebugOutput(DEBUG_GENERAL, " %s: %s" % (option, str(value)))
|
||||
|
||||
if not build_files:
|
||||
build_files = FindBuildFiles()
|
||||
if not build_files:
|
||||
raise GypError((usage + '\n\n%s: error: no build_file') %
|
||||
(my_name, my_name))
|
||||
|
||||
# TODO(mark): Chromium-specific hack!
|
||||
# For Chromium, the gyp "depth" variable should always be a relative path
|
||||
# to Chromium's top-level "src" directory. If no depth variable was set
|
||||
# on the command line, try to find a "src" directory by looking at the
|
||||
# absolute path to each build file's directory. The first "src" component
|
||||
# found will be treated as though it were the path used for --depth.
|
||||
if not options.depth:
|
||||
for build_file in build_files:
|
||||
build_file_dir = os.path.abspath(os.path.dirname(build_file))
|
||||
build_file_dir_components = build_file_dir.split(os.path.sep)
|
||||
components_len = len(build_file_dir_components)
|
||||
for index in xrange(components_len - 1, -1, -1):
|
||||
if build_file_dir_components[index] == 'src':
|
||||
options.depth = os.path.sep.join(build_file_dir_components)
|
||||
break
|
||||
del build_file_dir_components[index]
|
||||
|
||||
# If the inner loop found something, break without advancing to another
|
||||
# build file.
|
||||
if options.depth:
|
||||
break
|
||||
|
||||
if not options.depth:
|
||||
raise GypError('Could not automatically locate src directory. This is'
|
||||
'a temporary Chromium feature that will be removed. Use'
|
||||
'--depth as a workaround.')
|
||||
|
||||
# If toplevel-dir is not set, we assume that depth is the root of our source
|
||||
# tree.
|
||||
if not options.toplevel_dir:
|
||||
options.toplevel_dir = options.depth
|
||||
|
||||
# -D on the command line sets variable defaults - D isn't just for define,
|
||||
# it's for default. Perhaps there should be a way to force (-F?) a
|
||||
# variable's value so that it can't be overridden by anything else.
|
||||
cmdline_default_variables = {}
|
||||
defines = []
|
||||
if options.use_environment:
|
||||
defines += ShlexEnv('GYP_DEFINES')
|
||||
if options.defines:
|
||||
defines += options.defines
|
||||
cmdline_default_variables = NameValueListToDict(defines)
|
||||
if DEBUG_GENERAL in gyp.debug.keys():
|
||||
DebugOutput(DEBUG_GENERAL,
|
||||
"cmdline_default_variables: %s" % cmdline_default_variables)
|
||||
|
||||
# Set up includes.
|
||||
includes = []
|
||||
|
||||
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
|
||||
# .gyp file that's loaded, before anything else is included.
|
||||
if home_dot_gyp != None:
|
||||
default_include = os.path.join(home_dot_gyp, 'include.gypi')
|
||||
if os.path.exists(default_include):
|
||||
print 'Using overrides found in ' + default_include
|
||||
includes.append(default_include)
|
||||
|
||||
# Command-line --include files come after the default include.
|
||||
if options.includes:
|
||||
includes.extend(options.includes)
|
||||
|
||||
# Generator flags should be prefixed with the target generator since they
|
||||
# are global across all generator runs.
|
||||
gen_flags = []
|
||||
if options.use_environment:
|
||||
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
|
||||
if options.generator_flags:
|
||||
gen_flags += options.generator_flags
|
||||
generator_flags = NameValueListToDict(gen_flags)
|
||||
if DEBUG_GENERAL in gyp.debug.keys():
|
||||
DebugOutput(DEBUG_GENERAL, "generator_flags: %s" % generator_flags)
|
||||
|
||||
# TODO: Remove this and the option after we've gotten folks to move to the
|
||||
# generator flag.
|
||||
if options.msvs_version:
|
||||
print >>sys.stderr, \
|
||||
'DEPRECATED: Use generator flag (-G msvs_version=' + \
|
||||
options.msvs_version + ') instead of --msvs-version=' + \
|
||||
options.msvs_version
|
||||
generator_flags['msvs_version'] = options.msvs_version
|
||||
|
||||
# Generate all requested formats (use a set in case we got one format request
|
||||
# twice)
|
||||
for format in set(options.formats):
|
||||
params = {'options': options,
|
||||
'build_files': build_files,
|
||||
'generator_flags': generator_flags,
|
||||
'cwd': os.getcwd(),
|
||||
'build_files_arg': build_files_arg,
|
||||
'gyp_binary': sys.argv[0],
|
||||
'home_dot_gyp': home_dot_gyp}
|
||||
|
||||
# Start with the default variables from the command line.
|
||||
[generator, flat_list, targets, data] = Load(build_files, format,
|
||||
cmdline_default_variables,
|
||||
includes, options.depth,
|
||||
params, options.check,
|
||||
options.circular_check)
|
||||
|
||||
# TODO(mark): Pass |data| for now because the generator needs a list of
|
||||
# build files that came in. In the future, maybe it should just accept
|
||||
# a list, and not the whole data dict.
|
||||
# NOTE: flat_list is the flattened dependency graph specifying the order
|
||||
# that targets may be built. Build systems that operate serially or that
|
||||
# need to have dependencies defined before dependents reference them should
|
||||
# generate targets in the order specified in flat_list.
|
||||
generator.GenerateOutput(flat_list, targets, data, params)
|
||||
|
||||
# Done
|
||||
return 0
|
||||
|
||||
|
||||
def main(args):
|
||||
try:
|
||||
return gyp_main(args)
|
||||
except GypError, e:
|
||||
sys.stderr.write("gyp: %s\n" % e)
|
||||
return 1
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]))
|
461
google-breakpad/src/tools/gyp/pylib/gyp/common.py
Normal file
461
google-breakpad/src/tools/gyp/pylib/gyp/common.py
Normal file
@ -0,0 +1,461 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import errno
|
||||
import filecmp
|
||||
import os.path
|
||||
import re
|
||||
import tempfile
|
||||
import sys
|
||||
|
||||
|
||||
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
|
||||
# among other "problems".
|
||||
class memoize(object):
|
||||
def __init__(self, func):
|
||||
self.func = func
|
||||
self.cache = {}
|
||||
def __call__(self, *args):
|
||||
try:
|
||||
return self.cache[args]
|
||||
except KeyError:
|
||||
result = self.func(*args)
|
||||
self.cache[args] = result
|
||||
return result
|
||||
|
||||
|
||||
def ExceptionAppend(e, msg):
|
||||
"""Append a message to the given exception's message."""
|
||||
if not e.args:
|
||||
e.args = (msg,)
|
||||
elif len(e.args) == 1:
|
||||
e.args = (str(e.args[0]) + ' ' + msg,)
|
||||
else:
|
||||
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
|
||||
|
||||
|
||||
def ParseQualifiedTarget(target):
|
||||
# Splits a qualified target into a build file, target name and toolset.
|
||||
|
||||
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
|
||||
target_split = target.rsplit(':', 1)
|
||||
if len(target_split) == 2:
|
||||
[build_file, target] = target_split
|
||||
else:
|
||||
build_file = None
|
||||
|
||||
target_split = target.rsplit('#', 1)
|
||||
if len(target_split) == 2:
|
||||
[target, toolset] = target_split
|
||||
else:
|
||||
toolset = None
|
||||
|
||||
return [build_file, target, toolset]
|
||||
|
||||
|
||||
def ResolveTarget(build_file, target, toolset):
|
||||
# This function resolves a target into a canonical form:
|
||||
# - a fully defined build file, either absolute or relative to the current
|
||||
# directory
|
||||
# - a target name
|
||||
# - a toolset
|
||||
#
|
||||
# build_file is the file relative to which 'target' is defined.
|
||||
# target is the qualified target.
|
||||
# toolset is the default toolset for that target.
|
||||
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
|
||||
|
||||
if parsed_build_file:
|
||||
if build_file:
|
||||
# If a relative path, parsed_build_file is relative to the directory
|
||||
# containing build_file. If build_file is not in the current directory,
|
||||
# parsed_build_file is not a usable path as-is. Resolve it by
|
||||
# interpreting it as relative to build_file. If parsed_build_file is
|
||||
# absolute, it is usable as a path regardless of the current directory,
|
||||
# and os.path.join will return it as-is.
|
||||
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
|
||||
parsed_build_file))
|
||||
# Further (to handle cases like ../cwd), make it relative to cwd)
|
||||
if not os.path.isabs(build_file):
|
||||
build_file = RelativePath(build_file, '.')
|
||||
else:
|
||||
build_file = parsed_build_file
|
||||
|
||||
if parsed_toolset:
|
||||
toolset = parsed_toolset
|
||||
|
||||
return [build_file, target, toolset]
|
||||
|
||||
|
||||
def BuildFile(fully_qualified_target):
|
||||
# Extracts the build file from the fully qualified target.
|
||||
return ParseQualifiedTarget(fully_qualified_target)[0]
|
||||
|
||||
|
||||
def GetEnvironFallback(var_list, default):
|
||||
"""Look up a key in the environment, with fallback to secondary keys
|
||||
and finally falling back to a default value."""
|
||||
for var in var_list:
|
||||
if var in os.environ:
|
||||
return os.environ[var]
|
||||
return default
|
||||
|
||||
|
||||
def QualifiedTarget(build_file, target, toolset):
|
||||
# "Qualified" means the file that a target was defined in and the target
|
||||
# name, separated by a colon, suffixed by a # and the toolset name:
|
||||
# /path/to/file.gyp:target_name#toolset
|
||||
fully_qualified = build_file + ':' + target
|
||||
if toolset:
|
||||
fully_qualified = fully_qualified + '#' + toolset
|
||||
return fully_qualified
|
||||
|
||||
|
||||
@memoize
|
||||
def RelativePath(path, relative_to):
|
||||
# Assuming both |path| and |relative_to| are relative to the current
|
||||
# directory, returns a relative path that identifies path relative to
|
||||
# relative_to.
|
||||
|
||||
# Convert to absolute (and therefore normalized paths).
|
||||
path = os.path.abspath(path)
|
||||
relative_to = os.path.abspath(relative_to)
|
||||
|
||||
# Split the paths into components.
|
||||
path_split = path.split(os.path.sep)
|
||||
relative_to_split = relative_to.split(os.path.sep)
|
||||
|
||||
# Determine how much of the prefix the two paths share.
|
||||
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
|
||||
|
||||
# Put enough ".." components to back up out of relative_to to the common
|
||||
# prefix, and then append the part of path_split after the common prefix.
|
||||
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
|
||||
path_split[prefix_len:]
|
||||
|
||||
if len(relative_split) == 0:
|
||||
# The paths were the same.
|
||||
return ''
|
||||
|
||||
# Turn it back into a string and we're done.
|
||||
return os.path.join(*relative_split)
|
||||
|
||||
|
||||
def FixIfRelativePath(path, relative_to):
|
||||
# Like RelativePath but returns |path| unchanged if it is absolute.
|
||||
if os.path.isabs(path):
|
||||
return path
|
||||
return RelativePath(path, relative_to)
|
||||
|
||||
|
||||
def UnrelativePath(path, relative_to):
|
||||
# Assuming that |relative_to| is relative to the current directory, and |path|
|
||||
# is a path relative to the dirname of |relative_to|, returns a path that
|
||||
# identifies |path| relative to the current directory.
|
||||
rel_dir = os.path.dirname(relative_to)
|
||||
return os.path.normpath(os.path.join(rel_dir, path))
|
||||
|
||||
|
||||
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
|
||||
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
|
||||
# and the documentation for various shells.
|
||||
|
||||
# _quote is a pattern that should match any argument that needs to be quoted
|
||||
# with double-quotes by EncodePOSIXShellArgument. It matches the following
|
||||
# characters appearing anywhere in an argument:
|
||||
# \t, \n, space parameter separators
|
||||
# # comments
|
||||
# $ expansions (quoted to always expand within one argument)
|
||||
# % called out by IEEE 1003.1 XCU.2.2
|
||||
# & job control
|
||||
# ' quoting
|
||||
# (, ) subshell execution
|
||||
# *, ?, [ pathname expansion
|
||||
# ; command delimiter
|
||||
# <, >, | redirection
|
||||
# = assignment
|
||||
# {, } brace expansion (bash)
|
||||
# ~ tilde expansion
|
||||
# It also matches the empty string, because "" (or '') is the only way to
|
||||
# represent an empty string literal argument to a POSIX shell.
|
||||
#
|
||||
# This does not match the characters in _escape, because those need to be
|
||||
# backslash-escaped regardless of whether they appear in a double-quoted
|
||||
# string.
|
||||
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
|
||||
|
||||
# _escape is a pattern that should match any character that needs to be
|
||||
# escaped with a backslash, whether or not the argument matched the _quote
|
||||
# pattern. _escape is used with re.sub to backslash anything in _escape's
|
||||
# first match group, hence the (parentheses) in the regular expression.
|
||||
#
|
||||
# _escape matches the following characters appearing anywhere in an argument:
|
||||
# " to prevent POSIX shells from interpreting this character for quoting
|
||||
# \ to prevent POSIX shells from interpreting this character for escaping
|
||||
# ` to prevent POSIX shells from interpreting this character for command
|
||||
# substitution
|
||||
# Missing from this list is $, because the desired behavior of
|
||||
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
|
||||
#
|
||||
# Also missing from this list is !, which bash will interpret as the history
|
||||
# expansion character when history is enabled. bash does not enable history
|
||||
# by default in non-interactive shells, so this is not thought to be a problem.
|
||||
# ! was omitted from this list because bash interprets "\!" as a literal string
|
||||
# including the backslash character (avoiding history expansion but retaining
|
||||
# the backslash), which would not be correct for argument encoding. Handling
|
||||
# this case properly would also be problematic because bash allows the history
|
||||
# character to be changed with the histchars shell variable. Fortunately,
|
||||
# as history is not enabled in non-interactive shells and
|
||||
# EncodePOSIXShellArgument is only expected to encode for non-interactive
|
||||
# shells, there is no room for error here by ignoring !.
|
||||
_escape = re.compile(r'(["\\`])')
|
||||
|
||||
def EncodePOSIXShellArgument(argument):
|
||||
"""Encodes |argument| suitably for consumption by POSIX shells.
|
||||
|
||||
argument may be quoted and escaped as necessary to ensure that POSIX shells
|
||||
treat the returned value as a literal representing the argument passed to
|
||||
this function. Parameter (variable) expansions beginning with $ are allowed
|
||||
to remain intact without escaping the $, to allow the argument to contain
|
||||
references to variables to be expanded by the shell.
|
||||
"""
|
||||
|
||||
if not isinstance(argument, str):
|
||||
argument = str(argument)
|
||||
|
||||
if _quote.search(argument):
|
||||
quote = '"'
|
||||
else:
|
||||
quote = ''
|
||||
|
||||
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
|
||||
|
||||
return encoded
|
||||
|
||||
|
||||
def EncodePOSIXShellList(list):
|
||||
"""Encodes |list| suitably for consumption by POSIX shells.
|
||||
|
||||
Returns EncodePOSIXShellArgument for each item in list, and joins them
|
||||
together using the space character as an argument separator.
|
||||
"""
|
||||
|
||||
encoded_arguments = []
|
||||
for argument in list:
|
||||
encoded_arguments.append(EncodePOSIXShellArgument(argument))
|
||||
return ' '.join(encoded_arguments)
|
||||
|
||||
|
||||
def DeepDependencyTargets(target_dicts, roots):
|
||||
"""Returns the recursive list of target dependencies."""
|
||||
dependencies = set()
|
||||
pending = set(roots)
|
||||
while pending:
|
||||
# Pluck out one.
|
||||
r = pending.pop()
|
||||
# Skip if visited already.
|
||||
if r in dependencies:
|
||||
continue
|
||||
# Add it.
|
||||
dependencies.add(r)
|
||||
# Add its children.
|
||||
spec = target_dicts[r]
|
||||
pending.update(set(spec.get('dependencies', [])))
|
||||
pending.update(set(spec.get('dependencies_original', [])))
|
||||
return list(dependencies - set(roots))
|
||||
|
||||
|
||||
def BuildFileTargets(target_list, build_file):
|
||||
"""From a target_list, returns the subset from the specified build_file.
|
||||
"""
|
||||
return [p for p in target_list if BuildFile(p) == build_file]
|
||||
|
||||
|
||||
def AllTargets(target_list, target_dicts, build_file):
|
||||
"""Returns all targets (direct and dependencies) for the specified build_file.
|
||||
"""
|
||||
bftargets = BuildFileTargets(target_list, build_file)
|
||||
deptargets = DeepDependencyTargets(target_dicts, bftargets)
|
||||
return bftargets + deptargets
|
||||
|
||||
|
||||
def WriteOnDiff(filename):
|
||||
"""Write to a file only if the new contents differ.
|
||||
|
||||
Arguments:
|
||||
filename: name of the file to potentially write to.
|
||||
Returns:
|
||||
A file like object which will write to temporary file and only overwrite
|
||||
the target if it differs (on close).
|
||||
"""
|
||||
|
||||
class Writer:
|
||||
"""Wrapper around file which only covers the target if it differs."""
|
||||
def __init__(self):
|
||||
# Pick temporary file.
|
||||
tmp_fd, self.tmp_path = tempfile.mkstemp(
|
||||
suffix='.tmp',
|
||||
prefix=os.path.split(filename)[1] + '.gyp.',
|
||||
dir=os.path.split(filename)[0])
|
||||
try:
|
||||
self.tmp_file = os.fdopen(tmp_fd, 'wb')
|
||||
except Exception:
|
||||
# Don't leave turds behind.
|
||||
os.unlink(self.tmp_path)
|
||||
raise
|
||||
|
||||
def __getattr__(self, attrname):
|
||||
# Delegate everything else to self.tmp_file
|
||||
return getattr(self.tmp_file, attrname)
|
||||
|
||||
def close(self):
|
||||
try:
|
||||
# Close tmp file.
|
||||
self.tmp_file.close()
|
||||
# Determine if different.
|
||||
same = False
|
||||
try:
|
||||
same = filecmp.cmp(self.tmp_path, filename, False)
|
||||
except OSError, e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
if same:
|
||||
# The new file is identical to the old one, just get rid of the new
|
||||
# one.
|
||||
os.unlink(self.tmp_path)
|
||||
else:
|
||||
# The new file is different from the old one, or there is no old one.
|
||||
# Rename the new file to the permanent name.
|
||||
#
|
||||
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
|
||||
# file that can only be read by the owner, regardless of the umask.
|
||||
# There's no reason to not respect the umask here, which means that
|
||||
# an extra hoop is required to fetch it and reset the new file's mode.
|
||||
#
|
||||
# No way to get the umask without setting a new one? Set a safe one
|
||||
# and then set it back to the old value.
|
||||
umask = os.umask(077)
|
||||
os.umask(umask)
|
||||
os.chmod(self.tmp_path, 0666 & ~umask)
|
||||
if sys.platform == 'win32' and os.path.exists(filename):
|
||||
# NOTE: on windows (but not cygwin) rename will not replace an
|
||||
# existing file, so it must be preceded with a remove. Sadly there
|
||||
# is no way to make the switch atomic.
|
||||
os.remove(filename)
|
||||
os.rename(self.tmp_path, filename)
|
||||
except Exception:
|
||||
# Don't leave turds behind.
|
||||
os.unlink(self.tmp_path)
|
||||
raise
|
||||
|
||||
return Writer()
|
||||
|
||||
|
||||
def GetFlavor(params):
|
||||
"""Returns |params.flavor| if it's set, the system's default flavor else."""
|
||||
flavors = {
|
||||
'cygwin': 'win',
|
||||
'win32': 'win',
|
||||
'darwin': 'mac',
|
||||
'sunos5': 'solaris',
|
||||
'freebsd7': 'freebsd',
|
||||
'freebsd8': 'freebsd',
|
||||
'freebsd9': 'freebsd',
|
||||
}
|
||||
flavor = flavors.get(sys.platform, 'linux')
|
||||
return params.get('flavor', flavor)
|
||||
|
||||
|
||||
def CopyTool(flavor, out_path):
|
||||
"""Finds (mac|sun|win)_tool.gyp in the gyp directory and copies it
|
||||
to |out_path|."""
|
||||
prefix = { 'solaris': 'sun', 'mac': 'mac', 'win': 'win' }.get(flavor, None)
|
||||
if not prefix:
|
||||
return
|
||||
|
||||
# Slurp input file.
|
||||
source_path = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
|
||||
with open(source_path) as source_file:
|
||||
source = source_file.readlines()
|
||||
|
||||
# Add header and write it out.
|
||||
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
|
||||
with open(tool_path, 'w') as tool_file:
|
||||
tool_file.write(
|
||||
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
|
||||
|
||||
# Make file executable.
|
||||
os.chmod(tool_path, 0755)
|
||||
|
||||
|
||||
# From Alex Martelli,
|
||||
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
|
||||
# ASPN: Python Cookbook: Remove duplicates from a sequence
|
||||
# First comment, dated 2001/10/13.
|
||||
# (Also in the printed Python Cookbook.)
|
||||
|
||||
def uniquer(seq, idfun=None):
|
||||
if idfun is None:
|
||||
idfun = lambda x: x
|
||||
seen = {}
|
||||
result = []
|
||||
for item in seq:
|
||||
marker = idfun(item)
|
||||
if marker in seen: continue
|
||||
seen[marker] = 1
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
|
||||
class CycleError(Exception):
|
||||
"""An exception raised when an unexpected cycle is detected."""
|
||||
def __init__(self, nodes):
|
||||
self.nodes = nodes
|
||||
def __str__(self):
|
||||
return 'CycleError: cycle involving: ' + str(self.nodes)
|
||||
|
||||
|
||||
def TopologicallySorted(graph, get_edges):
|
||||
"""Topologically sort based on a user provided edge definition.
|
||||
|
||||
Args:
|
||||
graph: A list of node names.
|
||||
get_edges: A function mapping from node name to a hashable collection
|
||||
of node names which this node has outgoing edges to.
|
||||
Returns:
|
||||
A list containing all of the node in graph in topological order.
|
||||
It is assumed that calling get_edges once for each node and caching is
|
||||
cheaper than repeatedly calling get_edges.
|
||||
Raises:
|
||||
CycleError in the event of a cycle.
|
||||
Example:
|
||||
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
|
||||
def GetEdges(node):
|
||||
return re.findall(r'\$\(([^))]\)', graph[node])
|
||||
print TopologicallySorted(graph.keys(), GetEdges)
|
||||
==>
|
||||
['a', 'c', b']
|
||||
"""
|
||||
get_edges = memoize(get_edges)
|
||||
visited = set()
|
||||
visiting = set()
|
||||
ordered_nodes = []
|
||||
def Visit(node):
|
||||
if node in visiting:
|
||||
raise CycleError(visiting)
|
||||
if node in visited:
|
||||
return
|
||||
visited.add(node)
|
||||
visiting.add(node)
|
||||
for neighbor in get_edges(node):
|
||||
Visit(neighbor)
|
||||
visiting.remove(node)
|
||||
ordered_nodes.insert(0, node)
|
||||
for node in sorted(graph):
|
||||
Visit(node)
|
||||
return ordered_nodes
|
44
google-breakpad/src/tools/gyp/pylib/gyp/common_test.py
Normal file
44
google-breakpad/src/tools/gyp/pylib/gyp/common_test.py
Normal file
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Unit tests for the common.py file."""
|
||||
|
||||
import gyp.common
|
||||
import unittest
|
||||
|
||||
|
||||
class TestTopologicallySorted(unittest.TestCase):
|
||||
def test_Valid(self):
|
||||
"""Test that sorting works on a valid graph with one possible order."""
|
||||
graph = {
|
||||
'a': ['b', 'c'],
|
||||
'b': [],
|
||||
'c': ['d'],
|
||||
'd': ['b'],
|
||||
}
|
||||
def GetEdge(node):
|
||||
return tuple(graph[node])
|
||||
self.assertEqual(
|
||||
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
|
||||
['a', 'c', 'd', 'b'])
|
||||
|
||||
def test_Cycle(self):
|
||||
"""Test that an exception is thrown on a cyclic graph."""
|
||||
graph = {
|
||||
'a': ['b'],
|
||||
'b': ['c'],
|
||||
'c': ['d'],
|
||||
'd': ['a'],
|
||||
}
|
||||
def GetEdge(node):
|
||||
return tuple(graph[node])
|
||||
self.assertRaises(
|
||||
gyp.common.CycleError, gyp.common.TopologicallySorted,
|
||||
graph.keys(), GetEdge)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
148
google-breakpad/src/tools/gyp/pylib/gyp/easy_xml.py
Normal file
148
google-breakpad/src/tools/gyp/pylib/gyp/easy_xml.py
Normal file
@ -0,0 +1,148 @@
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import re
|
||||
|
||||
|
||||
def XmlToString(content, encoding='utf-8', pretty=False):
|
||||
""" Writes the XML content to disk, touching the file only if it has changed.
|
||||
|
||||
Visual Studio files have a lot of pre-defined structures. This function makes
|
||||
it easy to represent these structures as Python data structures, instead of
|
||||
having to create a lot of function calls.
|
||||
|
||||
Each XML element of the content is represented as a list composed of:
|
||||
1. The name of the element, a string,
|
||||
2. The attributes of the element, a dictionary (optional), and
|
||||
3+. The content of the element, if any. Strings are simple text nodes and
|
||||
lists are child elements.
|
||||
|
||||
Example 1:
|
||||
<test/>
|
||||
becomes
|
||||
['test']
|
||||
|
||||
Example 2:
|
||||
<myelement a='value1' b='value2'>
|
||||
<childtype>This is</childtype>
|
||||
<childtype>it!</childtype>
|
||||
</myelement>
|
||||
|
||||
becomes
|
||||
['myelement', {'a':'value1', 'b':'value2'},
|
||||
['childtype', 'This is'],
|
||||
['childtype', 'it!'],
|
||||
]
|
||||
|
||||
Args:
|
||||
content: The structured content to be converted.
|
||||
encoding: The encoding to report on the first XML line.
|
||||
pretty: True if we want pretty printing with indents and new lines.
|
||||
|
||||
Returns:
|
||||
The XML content as a string.
|
||||
"""
|
||||
# We create a huge list of all the elements of the file.
|
||||
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
|
||||
if pretty:
|
||||
xml_parts.append('\n')
|
||||
_ConstructContentList(xml_parts, content, pretty)
|
||||
|
||||
# Convert it to a string
|
||||
return ''.join(xml_parts)
|
||||
|
||||
|
||||
def _ConstructContentList(xml_parts, specification, pretty, level=0):
|
||||
""" Appends the XML parts corresponding to the specification.
|
||||
|
||||
Args:
|
||||
xml_parts: A list of XML parts to be appended to.
|
||||
specification: The specification of the element. See EasyXml docs.
|
||||
pretty: True if we want pretty printing with indents and new lines.
|
||||
level: Indentation level.
|
||||
"""
|
||||
# The first item in a specification is the name of the element.
|
||||
if pretty:
|
||||
indentation = ' ' * level
|
||||
new_line = '\n'
|
||||
else:
|
||||
indentation = ''
|
||||
new_line = ''
|
||||
name = specification[0]
|
||||
if not isinstance(name, str):
|
||||
raise Exception('The first item of an EasyXml specification should be '
|
||||
'a string. Specification was ' + str(specification))
|
||||
xml_parts.append(indentation + '<' + name)
|
||||
|
||||
# Optionally in second position is a dictionary of the attributes.
|
||||
rest = specification[1:]
|
||||
if rest and isinstance(rest[0], dict):
|
||||
for at, val in sorted(rest[0].iteritems()):
|
||||
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val)))
|
||||
rest = rest[1:]
|
||||
if rest:
|
||||
xml_parts.append('>')
|
||||
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
|
||||
multi_line = not all_strings
|
||||
if multi_line and new_line:
|
||||
xml_parts.append(new_line)
|
||||
for child_spec in rest:
|
||||
# If it's a string, append a text node.
|
||||
# Otherwise recurse over that child definition
|
||||
if isinstance(child_spec, str):
|
||||
xml_parts.append(_XmlEscape(child_spec))
|
||||
else:
|
||||
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
|
||||
if multi_line and indentation:
|
||||
xml_parts.append(indentation)
|
||||
xml_parts.append('</%s>%s' % (name, new_line))
|
||||
else:
|
||||
xml_parts.append('/>%s' % new_line)
|
||||
|
||||
|
||||
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False):
|
||||
""" Writes the XML content to disk, touching the file only if it has changed.
|
||||
|
||||
Args:
|
||||
content: The structured content to be written.
|
||||
path: Location of the file.
|
||||
encoding: The encoding to report on the first line of the XML file.
|
||||
pretty: True if we want pretty printing with indents and new lines.
|
||||
"""
|
||||
xml_string = XmlToString(content, encoding, pretty)
|
||||
|
||||
# Get the old content
|
||||
try:
|
||||
f = open(path, 'r')
|
||||
existing = f.read()
|
||||
f.close()
|
||||
except:
|
||||
existing = None
|
||||
|
||||
# It has changed, write it
|
||||
if existing != xml_string:
|
||||
f = open(path, 'w')
|
||||
f.write(xml_string)
|
||||
f.close()
|
||||
|
||||
|
||||
_xml_escape_map = {
|
||||
'"': '"',
|
||||
"'": ''',
|
||||
'<': '<',
|
||||
'>': '>',
|
||||
'&': '&',
|
||||
'\n': '
',
|
||||
'\r': '
',
|
||||
}
|
||||
|
||||
|
||||
_xml_escape_re = re.compile(
|
||||
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
|
||||
|
||||
|
||||
def _XmlEscape(value):
|
||||
""" Escape a string for inclusion in XML."""
|
||||
replace = lambda m: _xml_escape_map[m.string[m.start() : m.end()]]
|
||||
return _xml_escape_re.sub(replace, value)
|
102
google-breakpad/src/tools/gyp/pylib/gyp/easy_xml_test.py
Normal file
102
google-breakpad/src/tools/gyp/pylib/gyp/easy_xml_test.py
Normal file
@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
""" Unit tests for the easy_xml.py file. """
|
||||
|
||||
import gyp.easy_xml as easy_xml
|
||||
import unittest
|
||||
import StringIO
|
||||
|
||||
|
||||
class TestSequenceFunctions(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.stderr = StringIO.StringIO()
|
||||
|
||||
def test_EasyXml_simple(self):
|
||||
self.assertEqual(
|
||||
easy_xml.XmlToString(['test']),
|
||||
'<?xml version="1.0" encoding="utf-8"?><test/>')
|
||||
|
||||
self.assertEqual(
|
||||
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
|
||||
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
|
||||
|
||||
def test_EasyXml_simple_with_attributes(self):
|
||||
self.assertEqual(
|
||||
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
|
||||
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
|
||||
|
||||
def test_EasyXml_escaping(self):
|
||||
original = '<test>\'"\r&\nfoo'
|
||||
converted = '<test>'"
&
foo'
|
||||
self.assertEqual(
|
||||
easy_xml.XmlToString(['test3', {'a': original}, original]),
|
||||
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
|
||||
(converted, converted))
|
||||
|
||||
def test_EasyXml_pretty(self):
|
||||
self.assertEqual(
|
||||
easy_xml.XmlToString(
|
||||
['test3',
|
||||
['GrandParent',
|
||||
['Parent1',
|
||||
['Child']
|
||||
],
|
||||
['Parent2']
|
||||
]
|
||||
],
|
||||
pretty=True),
|
||||
'<?xml version="1.0" encoding="utf-8"?>\n'
|
||||
'<test3>\n'
|
||||
' <GrandParent>\n'
|
||||
' <Parent1>\n'
|
||||
' <Child/>\n'
|
||||
' </Parent1>\n'
|
||||
' <Parent2/>\n'
|
||||
' </GrandParent>\n'
|
||||
'</test3>\n')
|
||||
|
||||
|
||||
def test_EasyXml_complex(self):
|
||||
# We want to create:
|
||||
target = (
|
||||
'<?xml version="1.0" encoding="utf-8"?>'
|
||||
'<Project>'
|
||||
'<PropertyGroup Label="Globals">'
|
||||
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
|
||||
'<Keyword>Win32Proj</Keyword>'
|
||||
'<RootNamespace>automated_ui_tests</RootNamespace>'
|
||||
'</PropertyGroup>'
|
||||
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
|
||||
'<PropertyGroup '
|
||||
'Condition="'$(Configuration)|$(Platform)'=='
|
||||
''Debug|Win32'" Label="Configuration">'
|
||||
'<ConfigurationType>Application</ConfigurationType>'
|
||||
'<CharacterSet>Unicode</CharacterSet>'
|
||||
'</PropertyGroup>'
|
||||
'</Project>')
|
||||
|
||||
xml = easy_xml.XmlToString(
|
||||
['Project',
|
||||
['PropertyGroup', {'Label': 'Globals'},
|
||||
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
|
||||
['Keyword', 'Win32Proj'],
|
||||
['RootNamespace', 'automated_ui_tests']
|
||||
],
|
||||
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
|
||||
['PropertyGroup',
|
||||
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
|
||||
'Label': 'Configuration'},
|
||||
['ConfigurationType', 'Application'],
|
||||
['CharacterSet', 'Unicode']
|
||||
]
|
||||
])
|
||||
self.assertEqual(xml, target)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
1077
google-breakpad/src/tools/gyp/pylib/gyp/generator/android.py
Normal file
1077
google-breakpad/src/tools/gyp/pylib/gyp/generator/android.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,66 @@
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import collections
|
||||
import gyp
|
||||
import gyp.common
|
||||
import json
|
||||
import sys
|
||||
|
||||
generator_supports_multiple_toolsets = True
|
||||
|
||||
generator_wants_static_library_dependencies_adjusted = False
|
||||
|
||||
generator_default_variables = {
|
||||
}
|
||||
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
|
||||
'LIB_DIR', 'SHARED_LIB_DIR']:
|
||||
# Some gyp steps fail if these are empty(!).
|
||||
generator_default_variables[dirname] = 'dir'
|
||||
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
|
||||
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
|
||||
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
|
||||
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
|
||||
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX']:
|
||||
generator_default_variables[unused] = ''
|
||||
|
||||
|
||||
def CalculateVariables(default_variables, params):
|
||||
generator_flags = params.get('generator_flags', {})
|
||||
for key, val in generator_flags.items():
|
||||
default_variables.setdefault(key, val)
|
||||
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
|
||||
|
||||
|
||||
def CalculateGeneratorInputInfo(params):
|
||||
"""Calculate the generator specific info that gets fed to input (called by
|
||||
gyp)."""
|
||||
generator_flags = params.get('generator_flags', {})
|
||||
if generator_flags.get('adjust_static_libraries', False):
|
||||
global generator_wants_static_library_dependencies_adjusted
|
||||
generator_wants_static_library_dependencies_adjusted = True
|
||||
|
||||
|
||||
def GenerateOutput(target_list, target_dicts, data, params):
|
||||
# Map of target -> list of targets it depends on.
|
||||
edges = {}
|
||||
|
||||
# Queue of targets to visit.
|
||||
targets_to_visit = target_list[:]
|
||||
|
||||
while len(targets_to_visit) > 0:
|
||||
target = targets_to_visit.pop()
|
||||
if target in edges:
|
||||
continue
|
||||
edges[target] = []
|
||||
|
||||
for dep in target_dicts[target].get('dependencies', []):
|
||||
edges[target].append(dep)
|
||||
targets_to_visit.append(dep)
|
||||
|
||||
filename = 'dump.json'
|
||||
f = open(filename, 'w')
|
||||
json.dump(edges, f)
|
||||
f.close()
|
||||
print 'Wrote json to %s.' % filename
|
273
google-breakpad/src/tools/gyp/pylib/gyp/generator/eclipse.py
Normal file
273
google-breakpad/src/tools/gyp/pylib/gyp/generator/eclipse.py
Normal file
@ -0,0 +1,273 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""GYP backend that generates Eclipse CDT settings files.
|
||||
|
||||
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
|
||||
files that can be imported into an Eclipse CDT project. The XML file contains a
|
||||
list of include paths and symbols (i.e. defines).
|
||||
|
||||
Because a full .cproject definition is not created by this generator, it's not
|
||||
possible to properly define the include dirs and symbols for each file
|
||||
individually. Instead, one set of includes/symbols is generated for the entire
|
||||
project. This works fairly well (and is a vast improvement in general), but may
|
||||
still result in a few indexer issues here and there.
|
||||
|
||||
This generator has no automated tests, so expect it to be broken.
|
||||
"""
|
||||
|
||||
from xml.sax.saxutils import escape
|
||||
import os.path
|
||||
import subprocess
|
||||
import gyp
|
||||
import gyp.common
|
||||
import shlex
|
||||
|
||||
generator_wants_static_library_dependencies_adjusted = False
|
||||
|
||||
generator_default_variables = {
|
||||
}
|
||||
|
||||
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
|
||||
# Some gyp steps fail if these are empty(!).
|
||||
generator_default_variables[dirname] = 'dir'
|
||||
|
||||
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
|
||||
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
|
||||
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
|
||||
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
|
||||
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
|
||||
'CONFIGURATION_NAME']:
|
||||
generator_default_variables[unused] = ''
|
||||
|
||||
# Include dirs will occasionaly use the SHARED_INTERMEDIATE_DIR variable as
|
||||
# part of the path when dealing with generated headers. This value will be
|
||||
# replaced dynamically for each configuration.
|
||||
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
|
||||
'$SHARED_INTERMEDIATES_DIR'
|
||||
|
||||
|
||||
def CalculateVariables(default_variables, params):
|
||||
generator_flags = params.get('generator_flags', {})
|
||||
for key, val in generator_flags.items():
|
||||
default_variables.setdefault(key, val)
|
||||
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
|
||||
|
||||
|
||||
def CalculateGeneratorInputInfo(params):
|
||||
"""Calculate the generator specific info that gets fed to input (called by
|
||||
gyp)."""
|
||||
generator_flags = params.get('generator_flags', {})
|
||||
if generator_flags.get('adjust_static_libraries', False):
|
||||
global generator_wants_static_library_dependencies_adjusted
|
||||
generator_wants_static_library_dependencies_adjusted = True
|
||||
|
||||
|
||||
def GetAllIncludeDirectories(target_list, target_dicts,
|
||||
shared_intermediates_dir, config_name):
|
||||
"""Calculate the set of include directories to be used.
|
||||
|
||||
Returns:
|
||||
A list including all the include_dir's specified for every target followed
|
||||
by any include directories that were added as cflag compiler options.
|
||||
"""
|
||||
|
||||
gyp_includes_set = set()
|
||||
compiler_includes_list = []
|
||||
|
||||
for target_name in target_list:
|
||||
target = target_dicts[target_name]
|
||||
if config_name in target['configurations']:
|
||||
config = target['configurations'][config_name]
|
||||
|
||||
# Look for any include dirs that were explicitly added via cflags. This
|
||||
# may be done in gyp files to force certain includes to come at the end.
|
||||
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
|
||||
# remove this.
|
||||
cflags = config['cflags']
|
||||
for cflag in cflags:
|
||||
include_dir = ''
|
||||
if cflag.startswith('-I'):
|
||||
include_dir = cflag[2:]
|
||||
if include_dir and not include_dir in compiler_includes_list:
|
||||
compiler_includes_list.append(include_dir)
|
||||
|
||||
# Find standard gyp include dirs.
|
||||
if config.has_key('include_dirs'):
|
||||
include_dirs = config['include_dirs']
|
||||
for include_dir in include_dirs:
|
||||
include_dir = include_dir.replace('$SHARED_INTERMEDIATES_DIR',
|
||||
shared_intermediates_dir)
|
||||
if not os.path.isabs(include_dir):
|
||||
base_dir = os.path.dirname(target_name)
|
||||
|
||||
include_dir = base_dir + '/' + include_dir
|
||||
include_dir = os.path.abspath(include_dir)
|
||||
|
||||
if not include_dir in gyp_includes_set:
|
||||
gyp_includes_set.add(include_dir)
|
||||
|
||||
|
||||
# Generate a list that has all the include dirs.
|
||||
all_includes_list = list(gyp_includes_set)
|
||||
all_includes_list.sort()
|
||||
for compiler_include in compiler_includes_list:
|
||||
if not compiler_include in gyp_includes_set:
|
||||
all_includes_list.append(compiler_include)
|
||||
|
||||
# All done.
|
||||
return all_includes_list
|
||||
|
||||
|
||||
def GetCompilerPath(target_list, target_dicts, data):
|
||||
"""Determine a command that can be used to invoke the compiler.
|
||||
|
||||
Returns:
|
||||
If this is a gyp project that has explicit make settings, try to determine
|
||||
the compiler from that. Otherwise, see if a compiler was specified via the
|
||||
CC_target environment variable.
|
||||
"""
|
||||
|
||||
# First, see if the compiler is configured in make's settings.
|
||||
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
|
||||
make_global_settings_dict = data[build_file].get('make_global_settings', {})
|
||||
for key, value in make_global_settings_dict:
|
||||
if key in ['CC', 'CXX']:
|
||||
return value
|
||||
|
||||
# Check to see if the compiler was specified as an environment variable.
|
||||
for key in ['CC_target', 'CC', 'CXX']:
|
||||
compiler = os.environ.get(key)
|
||||
if compiler:
|
||||
return compiler
|
||||
|
||||
return 'gcc'
|
||||
|
||||
|
||||
def GetAllDefines(target_list, target_dicts, data, config_name):
|
||||
"""Calculate the defines for a project.
|
||||
|
||||
Returns:
|
||||
A dict that includes explict defines declared in gyp files along with all of
|
||||
the default defines that the compiler uses.
|
||||
"""
|
||||
|
||||
# Get defines declared in the gyp files.
|
||||
all_defines = {}
|
||||
for target_name in target_list:
|
||||
target = target_dicts[target_name]
|
||||
|
||||
if config_name in target['configurations']:
|
||||
config = target['configurations'][config_name]
|
||||
for define in config['defines']:
|
||||
split_define = define.split('=', 1)
|
||||
if len(split_define) == 1:
|
||||
split_define.append('1')
|
||||
if split_define[0].strip() in all_defines:
|
||||
# Already defined
|
||||
continue
|
||||
|
||||
all_defines[split_define[0].strip()] = split_define[1].strip()
|
||||
|
||||
# Get default compiler defines (if possible).
|
||||
cc_target = GetCompilerPath(target_list, target_dicts, data)
|
||||
if cc_target:
|
||||
command = shlex.split(cc_target)
|
||||
command.extend(['-E', '-dM', '-'])
|
||||
cpp_proc = subprocess.Popen(args=command, cwd='.',
|
||||
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
cpp_output = cpp_proc.communicate()[0]
|
||||
cpp_lines = cpp_output.split('\n')
|
||||
for cpp_line in cpp_lines:
|
||||
if not cpp_line.strip():
|
||||
continue
|
||||
cpp_line_parts = cpp_line.split(' ', 2)
|
||||
key = cpp_line_parts[1]
|
||||
if len(cpp_line_parts) >= 3:
|
||||
val = cpp_line_parts[2]
|
||||
else:
|
||||
val = '1'
|
||||
all_defines[key] = val
|
||||
|
||||
return all_defines
|
||||
|
||||
|
||||
def WriteIncludePaths(out, eclipse_langs, include_dirs):
|
||||
"""Write the includes section of a CDT settings export file."""
|
||||
|
||||
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
|
||||
'settingswizards.IncludePaths">\n')
|
||||
out.write(' <language name="holder for library settings"></language>\n')
|
||||
for lang in eclipse_langs:
|
||||
out.write(' <language name="%s">\n' % lang)
|
||||
for include_dir in include_dirs:
|
||||
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
|
||||
include_dir)
|
||||
out.write(' </language>\n')
|
||||
out.write(' </section>\n')
|
||||
|
||||
|
||||
def WriteMacros(out, eclipse_langs, defines):
|
||||
"""Write the macros section of a CDT settings export file."""
|
||||
|
||||
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
|
||||
'settingswizards.Macros">\n')
|
||||
out.write(' <language name="holder for library settings"></language>\n')
|
||||
for lang in eclipse_langs:
|
||||
out.write(' <language name="%s">\n' % lang)
|
||||
for key in sorted(defines.iterkeys()):
|
||||
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
|
||||
(escape(key), escape(defines[key])))
|
||||
out.write(' </language>\n')
|
||||
out.write(' </section>\n')
|
||||
|
||||
|
||||
def GenerateOutputForConfig(target_list, target_dicts, data, params,
|
||||
config_name):
|
||||
options = params['options']
|
||||
generator_flags = params.get('generator_flags', {})
|
||||
|
||||
# build_dir: relative path from source root to our output files.
|
||||
# e.g. "out/Debug"
|
||||
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
|
||||
config_name)
|
||||
|
||||
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
|
||||
shared_intermediate_dir = os.path.join(toplevel_build, 'obj', 'gen')
|
||||
|
||||
if not os.path.exists(toplevel_build):
|
||||
os.makedirs(toplevel_build)
|
||||
out = open(os.path.join(toplevel_build, 'eclipse-cdt-settings.xml'), 'w')
|
||||
|
||||
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
|
||||
out.write('<cdtprojectproperties>\n')
|
||||
|
||||
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
|
||||
'GNU C++', 'GNU C', 'Assembly']
|
||||
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
|
||||
shared_intermediate_dir, config_name)
|
||||
WriteIncludePaths(out, eclipse_langs, include_dirs)
|
||||
defines = GetAllDefines(target_list, target_dicts, data, config_name)
|
||||
WriteMacros(out, eclipse_langs, defines)
|
||||
|
||||
out.write('</cdtprojectproperties>\n')
|
||||
out.close()
|
||||
|
||||
|
||||
def GenerateOutput(target_list, target_dicts, data, params):
|
||||
"""Generate an XML settings file that can be imported into a CDT project."""
|
||||
|
||||
if params['options'].generator_output:
|
||||
raise NotImplementedError, "--generator_output not implemented for eclipse"
|
||||
|
||||
user_config = params.get('generator_flags', {}).get('config', None)
|
||||
if user_config:
|
||||
GenerateOutputForConfig(target_list, target_dicts, data, params,
|
||||
user_config)
|
||||
else:
|
||||
config_names = target_dicts[target_list[0]]['configurations'].keys()
|
||||
for config_name in config_names:
|
||||
GenerateOutputForConfig(target_list, target_dicts, data, params,
|
||||
config_name)
|
||||
|
87
google-breakpad/src/tools/gyp/pylib/gyp/generator/gypd.py
Normal file
87
google-breakpad/src/tools/gyp/pylib/gyp/generator/gypd.py
Normal file
@ -0,0 +1,87 @@
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""gypd output module
|
||||
|
||||
This module produces gyp input as its output. Output files are given the
|
||||
.gypd extension to avoid overwriting the .gyp files that they are generated
|
||||
from. Internal references to .gyp files (such as those found in
|
||||
"dependencies" sections) are not adjusted to point to .gypd files instead;
|
||||
unlike other paths, which are relative to the .gyp or .gypd file, such paths
|
||||
are relative to the directory from which gyp was run to create the .gypd file.
|
||||
|
||||
This generator module is intended to be a sample and a debugging aid, hence
|
||||
the "d" for "debug" in .gypd. It is useful to inspect the results of the
|
||||
various merges, expansions, and conditional evaluations performed by gyp
|
||||
and to see a representation of what would be fed to a generator module.
|
||||
|
||||
It's not advisable to rename .gypd files produced by this module to .gyp,
|
||||
because they will have all merges, expansions, and evaluations already
|
||||
performed and the relevant constructs not present in the output; paths to
|
||||
dependencies may be wrong; and various sections that do not belong in .gyp
|
||||
files such as such as "included_files" and "*_excluded" will be present.
|
||||
Output will also be stripped of comments. This is not intended to be a
|
||||
general-purpose gyp pretty-printer; for that, you probably just want to
|
||||
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
|
||||
comments but won't do all of the other things done to this module's output.
|
||||
|
||||
The specific formatting of the output generated by this module is subject
|
||||
to change.
|
||||
"""
|
||||
|
||||
|
||||
import gyp.common
|
||||
import errno
|
||||
import os
|
||||
import pprint
|
||||
|
||||
|
||||
# These variables should just be spit back out as variable references.
|
||||
_generator_identity_variables = [
|
||||
'EXECUTABLE_PREFIX',
|
||||
'EXECUTABLE_SUFFIX',
|
||||
'INTERMEDIATE_DIR',
|
||||
'PRODUCT_DIR',
|
||||
'RULE_INPUT_ROOT',
|
||||
'RULE_INPUT_DIRNAME',
|
||||
'RULE_INPUT_EXT',
|
||||
'RULE_INPUT_NAME',
|
||||
'RULE_INPUT_PATH',
|
||||
'SHARED_INTERMEDIATE_DIR',
|
||||
]
|
||||
|
||||
# gypd doesn't define a default value for OS like many other generator
|
||||
# modules. Specify "-D OS=whatever" on the command line to provide a value.
|
||||
generator_default_variables = {
|
||||
}
|
||||
|
||||
# gypd supports multiple toolsets
|
||||
generator_supports_multiple_toolsets = True
|
||||
|
||||
# TODO(mark): This always uses <, which isn't right. The input module should
|
||||
# notify the generator to tell it which phase it is operating in, and this
|
||||
# module should use < for the early phase and then switch to > for the late
|
||||
# phase. Bonus points for carrying @ back into the output too.
|
||||
for v in _generator_identity_variables:
|
||||
generator_default_variables[v] = '<(%s)' % v
|
||||
|
||||
|
||||
def GenerateOutput(target_list, target_dicts, data, params):
|
||||
output_files = {}
|
||||
for qualified_target in target_list:
|
||||
[input_file, target] = \
|
||||
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
|
||||
|
||||
if input_file[-4:] != '.gyp':
|
||||
continue
|
||||
input_file_stem = input_file[:-4]
|
||||
output_file = input_file_stem + params['options'].suffix + '.gypd'
|
||||
|
||||
if not output_file in output_files:
|
||||
output_files[output_file] = input_file
|
||||
|
||||
for output_file, input_file in output_files.iteritems():
|
||||
output = open(output_file, 'w')
|
||||
pprint.pprint(data[input_file], output)
|
||||
output.close()
|
56
google-breakpad/src/tools/gyp/pylib/gyp/generator/gypsh.py
Normal file
56
google-breakpad/src/tools/gyp/pylib/gyp/generator/gypsh.py
Normal file
@ -0,0 +1,56 @@
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""gypsh output module
|
||||
|
||||
gypsh is a GYP shell. It's not really a generator per se. All it does is
|
||||
fire up an interactive Python session with a few local variables set to the
|
||||
variables passed to the generator. Like gypd, it's intended as a debugging
|
||||
aid, to facilitate the exploration of .gyp structures after being processed
|
||||
by the input module.
|
||||
|
||||
The expected usage is "gyp -f gypsh -D OS=desired_os".
|
||||
"""
|
||||
|
||||
|
||||
import code
|
||||
import sys
|
||||
|
||||
|
||||
# All of this stuff about generator variables was lovingly ripped from gypd.py.
|
||||
# That module has a much better description of what's going on and why.
|
||||
_generator_identity_variables = [
|
||||
'EXECUTABLE_PREFIX',
|
||||
'EXECUTABLE_SUFFIX',
|
||||
'INTERMEDIATE_DIR',
|
||||
'PRODUCT_DIR',
|
||||
'RULE_INPUT_ROOT',
|
||||
'RULE_INPUT_DIRNAME',
|
||||
'RULE_INPUT_EXT',
|
||||
'RULE_INPUT_NAME',
|
||||
'RULE_INPUT_PATH',
|
||||
'SHARED_INTERMEDIATE_DIR',
|
||||
]
|
||||
|
||||
generator_default_variables = {
|
||||
}
|
||||
|
||||
for v in _generator_identity_variables:
|
||||
generator_default_variables[v] = '<(%s)' % v
|
||||
|
||||
|
||||
def GenerateOutput(target_list, target_dicts, data, params):
|
||||
locals = {
|
||||
'target_list': target_list,
|
||||
'target_dicts': target_dicts,
|
||||
'data': data,
|
||||
}
|
||||
|
||||
# Use a banner that looks like the stock Python one and like what
|
||||
# code.interact uses by default, but tack on something to indicate what
|
||||
# locals are available, and identify gypsh.
|
||||
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
|
||||
(sys.version, sys.platform, repr(sorted(locals.keys())))
|
||||
|
||||
code.interact(banner, local=locals)
|
2144
google-breakpad/src/tools/gyp/pylib/gyp/generator/make.py
Normal file
2144
google-breakpad/src/tools/gyp/pylib/gyp/generator/make.py
Normal file
File diff suppressed because it is too large
Load Diff
3135
google-breakpad/src/tools/gyp/pylib/gyp/generator/msvs.py
Normal file
3135
google-breakpad/src/tools/gyp/pylib/gyp/generator/msvs.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
""" Unit tests for the msvs.py file. """
|
||||
|
||||
import gyp.generator.msvs as msvs
|
||||
import unittest
|
||||
import StringIO
|
||||
|
||||
|
||||
class TestSequenceFunctions(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.stderr = StringIO.StringIO()
|
||||
|
||||
def test_GetLibraries(self):
|
||||
self.assertEqual(
|
||||
msvs._GetLibraries({}),
|
||||
[])
|
||||
self.assertEqual(
|
||||
msvs._GetLibraries({'libraries': []}),
|
||||
[])
|
||||
self.assertEqual(
|
||||
msvs._GetLibraries({'other':'foo', 'libraries': ['a.lib']}),
|
||||
['a.lib'])
|
||||
self.assertEqual(
|
||||
msvs._GetLibraries({'libraries': ['a.lib', 'b.lib', 'c.lib', '-lb.lib',
|
||||
'-lb.lib', 'd.lib', 'a.lib']}),
|
||||
['c.lib', 'b.lib', 'd.lib', 'a.lib'])
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
1712
google-breakpad/src/tools/gyp/pylib/gyp/generator/ninja.py
Normal file
1712
google-breakpad/src/tools/gyp/pylib/gyp/generator/ninja.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
""" Unit tests for the ninja.py file. """
|
||||
|
||||
import gyp.generator.ninja as ninja
|
||||
import unittest
|
||||
import StringIO
|
||||
import sys
|
||||
import TestCommon
|
||||
|
||||
|
||||
class TestPrefixesAndSuffixes(unittest.TestCase):
|
||||
if sys.platform in ('win32', 'cygwin'):
|
||||
def test_BinaryNamesWindows(self):
|
||||
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'ninja.build', 'win')
|
||||
spec = { 'target_name': 'wee' }
|
||||
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
|
||||
endswith('.exe'))
|
||||
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
|
||||
endswith('.dll'))
|
||||
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
|
||||
endswith('.lib'))
|
||||
|
||||
if sys.platform == 'linux2':
|
||||
def test_BinaryNamesLinux(self):
|
||||
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'ninja.build', 'linux')
|
||||
spec = { 'target_name': 'wee' }
|
||||
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
|
||||
'executable'))
|
||||
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
|
||||
startswith('lib'))
|
||||
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
|
||||
startswith('lib'))
|
||||
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
|
||||
endswith('.so'))
|
||||
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
|
||||
endswith('.a'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
1047
google-breakpad/src/tools/gyp/pylib/gyp/generator/scons.py
Normal file
1047
google-breakpad/src/tools/gyp/pylib/gyp/generator/scons.py
Normal file
File diff suppressed because it is too large
Load Diff
1209
google-breakpad/src/tools/gyp/pylib/gyp/generator/xcode.py
Normal file
1209
google-breakpad/src/tools/gyp/pylib/gyp/generator/xcode.py
Normal file
File diff suppressed because it is too large
Load Diff
2485
google-breakpad/src/tools/gyp/pylib/gyp/input.py
Normal file
2485
google-breakpad/src/tools/gyp/pylib/gyp/input.py
Normal file
File diff suppressed because it is too large
Load Diff
209
google-breakpad/src/tools/gyp/pylib/gyp/mac_tool.py
Normal file
209
google-breakpad/src/tools/gyp/pylib/gyp/mac_tool.py
Normal file
@ -0,0 +1,209 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Utility functions to perform Xcode-style build steps.
|
||||
|
||||
These functions are executed via gyp-mac-tool when using the Makefile generator.
|
||||
"""
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import plistlib
|
||||
import re
|
||||
import shutil
|
||||
import string
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def main(args):
|
||||
executor = MacTool()
|
||||
exit_code = executor.Dispatch(args)
|
||||
if exit_code is not None:
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
class MacTool(object):
|
||||
"""This class performs all the Mac tooling steps. The methods can either be
|
||||
executed directly, or dispatched from an argument list."""
|
||||
|
||||
def Dispatch(self, args):
|
||||
"""Dispatches a string command to a method."""
|
||||
if len(args) < 1:
|
||||
raise Exception("Not enough arguments")
|
||||
|
||||
method = "Exec%s" % self._CommandifyName(args[0])
|
||||
return getattr(self, method)(*args[1:])
|
||||
|
||||
def _CommandifyName(self, name_string):
|
||||
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
|
||||
return name_string.title().replace('-', '')
|
||||
|
||||
def ExecCopyBundleResource(self, source, dest):
|
||||
"""Copies a resource file to the bundle/Resources directory, performing any
|
||||
necessary compilation on each resource."""
|
||||
extension = os.path.splitext(source)[1].lower()
|
||||
if os.path.isdir(source):
|
||||
# Copy tree.
|
||||
if os.path.exists(dest):
|
||||
shutil.rmtree(dest)
|
||||
shutil.copytree(source, dest)
|
||||
elif extension == '.xib':
|
||||
return self._CopyXIBFile(source, dest)
|
||||
elif extension == '.strings':
|
||||
self._CopyStringsFile(source, dest)
|
||||
else:
|
||||
shutil.copyfile(source, dest)
|
||||
|
||||
def _CopyXIBFile(self, source, dest):
|
||||
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
|
||||
tools_dir = os.environ.get('DEVELOPER_BIN_DIR', '/usr/bin')
|
||||
args = [os.path.join(tools_dir, 'ibtool'), '--errors', '--warnings',
|
||||
'--notices', '--output-format', 'human-readable-text', '--compile',
|
||||
dest, source]
|
||||
ibtool_section_re = re.compile(r'/\*.*\*/')
|
||||
ibtool_re = re.compile(r'.*note:.*is clipping its content')
|
||||
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
|
||||
current_section_header = None
|
||||
for line in ibtoolout.stdout:
|
||||
if ibtool_section_re.match(line):
|
||||
current_section_header = line
|
||||
elif not ibtool_re.match(line):
|
||||
if current_section_header:
|
||||
sys.stdout.write(current_section_header)
|
||||
current_section_header = None
|
||||
sys.stdout.write(line)
|
||||
return ibtoolout.returncode
|
||||
|
||||
def _CopyStringsFile(self, source, dest):
|
||||
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
|
||||
input_code = self._DetectInputEncoding(source) or "UTF-8"
|
||||
fp = open(dest, 'w')
|
||||
args = ['/usr/bin/iconv', '--from-code', input_code, '--to-code',
|
||||
'UTF-16', source]
|
||||
subprocess.call(args, stdout=fp)
|
||||
fp.close()
|
||||
|
||||
def _DetectInputEncoding(self, file_name):
|
||||
"""Reads the first few bytes from file_name and tries to guess the text
|
||||
encoding. Returns None as a guess if it can't detect it."""
|
||||
fp = open(file_name, 'rb')
|
||||
try:
|
||||
header = fp.read(3)
|
||||
except e:
|
||||
fp.close()
|
||||
return None
|
||||
fp.close()
|
||||
if header.startswith("\xFE\xFF"):
|
||||
return "UTF-16BE"
|
||||
elif header.startswith("\xFF\xFE"):
|
||||
return "UTF-16LE"
|
||||
elif header.startswith("\xEF\xBB\xBF"):
|
||||
return "UTF-8"
|
||||
else:
|
||||
return None
|
||||
|
||||
def ExecCopyInfoPlist(self, source, dest):
|
||||
"""Copies the |source| Info.plist to the destination directory |dest|."""
|
||||
# Read the source Info.plist into memory.
|
||||
fd = open(source, 'r')
|
||||
lines = fd.read()
|
||||
fd.close()
|
||||
|
||||
# Go through all the environment variables and replace them as variables in
|
||||
# the file.
|
||||
for key in os.environ:
|
||||
if key.startswith('_'):
|
||||
continue
|
||||
evar = '${%s}' % key
|
||||
lines = string.replace(lines, evar, os.environ[key])
|
||||
|
||||
# Write out the file with variables replaced.
|
||||
fd = open(dest, 'w')
|
||||
fd.write(lines)
|
||||
fd.close()
|
||||
|
||||
# Now write out PkgInfo file now that the Info.plist file has been
|
||||
# "compiled".
|
||||
self._WritePkgInfo(dest)
|
||||
|
||||
def _WritePkgInfo(self, info_plist):
|
||||
"""This writes the PkgInfo file from the data stored in Info.plist."""
|
||||
plist = plistlib.readPlist(info_plist)
|
||||
if not plist:
|
||||
return
|
||||
|
||||
# Only create PkgInfo for executable types.
|
||||
package_type = plist['CFBundlePackageType']
|
||||
if package_type != 'APPL':
|
||||
return
|
||||
|
||||
# The format of PkgInfo is eight characters, representing the bundle type
|
||||
# and bundle signature, each four characters. If that is missing, four
|
||||
# '?' characters are used instead.
|
||||
signature_code = plist.get('CFBundleSignature', '????')
|
||||
if len(signature_code) != 4: # Wrong length resets everything, too.
|
||||
signature_code = '?' * 4
|
||||
|
||||
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
|
||||
fp = open(dest, 'w')
|
||||
fp.write('%s%s' % (package_type, signature_code))
|
||||
fp.close()
|
||||
|
||||
def ExecFlock(self, lockfile, *cmd_list):
|
||||
"""Emulates the most basic behavior of Linux's flock(1)."""
|
||||
# Rely on exception handling to report errors.
|
||||
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
|
||||
fcntl.flock(fd, fcntl.LOCK_EX)
|
||||
return subprocess.call(cmd_list)
|
||||
|
||||
def ExecFilterLibtool(self, *cmd_list):
|
||||
"""Calls libtool and filters out 'libtool: file: foo.o has no symbols'."""
|
||||
libtool_re = re.compile(r'^libtool: file: .* has no symbols$')
|
||||
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE)
|
||||
for line in libtoolout.stderr:
|
||||
if not libtool_re.match(line):
|
||||
sys.stderr.write(line)
|
||||
return libtoolout.returncode
|
||||
|
||||
def ExecPackageFramework(self, framework, version):
|
||||
"""Takes a path to Something.framework and the Current version of that and
|
||||
sets up all the symlinks."""
|
||||
# Find the name of the binary based on the part before the ".framework".
|
||||
binary = os.path.basename(framework).split('.')[0]
|
||||
|
||||
CURRENT = 'Current'
|
||||
RESOURCES = 'Resources'
|
||||
VERSIONS = 'Versions'
|
||||
|
||||
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
|
||||
# Binary-less frameworks don't seem to contain symlinks (see e.g.
|
||||
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
|
||||
return
|
||||
|
||||
# Move into the framework directory to set the symlinks correctly.
|
||||
pwd = os.getcwd()
|
||||
os.chdir(framework)
|
||||
|
||||
# Set up the Current version.
|
||||
self._Relink(version, os.path.join(VERSIONS, CURRENT))
|
||||
|
||||
# Set up the root symlinks.
|
||||
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
|
||||
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
|
||||
|
||||
# Back to where we were before!
|
||||
os.chdir(pwd)
|
||||
|
||||
def _Relink(self, dest, link):
|
||||
"""Creates a symlink to |dest| named |link|. If |link| already exists,
|
||||
it is overwritten."""
|
||||
if os.path.lexists(link):
|
||||
os.remove(link)
|
||||
os.symlink(dest, link)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]))
|
691
google-breakpad/src/tools/gyp/pylib/gyp/msvs_emulation.py
Normal file
691
google-breakpad/src/tools/gyp/pylib/gyp/msvs_emulation.py
Normal file
@ -0,0 +1,691 @@
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""
|
||||
This module helps emulate Visual Studio 2008 behavior on top of other
|
||||
build systems, primarily ninja.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import gyp.MSVSVersion
|
||||
|
||||
windows_quoter_regex = re.compile(r'(\\*)"')
|
||||
|
||||
def QuoteForRspFile(arg):
|
||||
"""Quote a command line argument so that it appears as one argument when
|
||||
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
|
||||
Windows programs)."""
|
||||
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
|
||||
# threads. This is actually the quoting rules for CommandLineToArgvW, not
|
||||
# for the shell, because the shell doesn't do anything in Windows. This
|
||||
# works more or less because most programs (including the compiler, etc.)
|
||||
# use that function to handle command line arguments.
|
||||
|
||||
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
|
||||
# preceding it, and results in n backslashes + the quote. So we substitute
|
||||
# in 2* what we match, +1 more, plus the quote.
|
||||
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
|
||||
|
||||
# %'s also need to be doubled otherwise they're interpreted as batch
|
||||
# positional arguments. Also make sure to escape the % so that they're
|
||||
# passed literally through escaping so they can be singled to just the
|
||||
# original %. Otherwise, trying to pass the literal representation that
|
||||
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
|
||||
arg = arg.replace('%', '%%')
|
||||
|
||||
# These commands are used in rsp files, so no escaping for the shell (via ^)
|
||||
# is necessary.
|
||||
|
||||
# Finally, wrap the whole thing in quotes so that the above quote rule
|
||||
# applies and whitespace isn't a word break.
|
||||
return '"' + arg + '"'
|
||||
|
||||
|
||||
def EncodeRspFileList(args):
|
||||
"""Process a list of arguments using QuoteCmdExeArgument."""
|
||||
# Note that the first argument is assumed to be the command. Don't add
|
||||
# quotes around it because then built-ins like 'echo', etc. won't work.
|
||||
# Take care to normpath only the path in the case of 'call ../x.bat' because
|
||||
# otherwise the whole thing is incorrectly interpreted as a path and not
|
||||
# normalized correctly.
|
||||
if not args: return ''
|
||||
if args[0].startswith('call '):
|
||||
call, program = args[0].split(' ', 1)
|
||||
program = call + ' ' + os.path.normpath(program)
|
||||
else:
|
||||
program = os.path.normpath(args[0])
|
||||
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
|
||||
|
||||
|
||||
def _GenericRetrieve(root, default, path):
|
||||
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
|
||||
value at path, or return |default| if any of the path doesn't exist."""
|
||||
if not root:
|
||||
return default
|
||||
if not path:
|
||||
return root
|
||||
return _GenericRetrieve(root.get(path[0]), default, path[1:])
|
||||
|
||||
|
||||
def _AddPrefix(element, prefix):
|
||||
"""Add |prefix| to |element| or each subelement if element is iterable."""
|
||||
if element is None:
|
||||
return element
|
||||
# Note, not Iterable because we don't want to handle strings like that.
|
||||
if isinstance(element, list) or isinstance(element, tuple):
|
||||
return [prefix + e for e in element]
|
||||
else:
|
||||
return prefix + element
|
||||
|
||||
|
||||
def _DoRemapping(element, map):
|
||||
"""If |element| then remap it through |map|. If |element| is iterable then
|
||||
each item will be remapped. Any elements not found will be removed."""
|
||||
if map is not None and element is not None:
|
||||
if not callable(map):
|
||||
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
|
||||
if isinstance(element, list) or isinstance(element, tuple):
|
||||
element = filter(None, [map(elem) for elem in element])
|
||||
else:
|
||||
element = map(element)
|
||||
return element
|
||||
|
||||
|
||||
def _AppendOrReturn(append, element):
|
||||
"""If |append| is None, simply return |element|. If |append| is not None,
|
||||
then add |element| to it, adding each item in |element| if it's a list or
|
||||
tuple."""
|
||||
if append is not None and element is not None:
|
||||
if isinstance(element, list) or isinstance(element, tuple):
|
||||
append.extend(element)
|
||||
else:
|
||||
append.append(element)
|
||||
else:
|
||||
return element
|
||||
|
||||
|
||||
def _FindDirectXInstallation():
|
||||
"""Try to find an installation location for the DirectX SDK. Check for the
|
||||
standard environment variable, and if that doesn't exist, try to find
|
||||
via the registry. May return None if not found in either location."""
|
||||
# Return previously calculated value, if there is one
|
||||
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
|
||||
return _FindDirectXInstallation.dxsdk_dir
|
||||
|
||||
dxsdk_dir = os.environ.get('DXSDK_DIR')
|
||||
if not dxsdk_dir:
|
||||
# Setup params to pass to and attempt to launch reg.exe.
|
||||
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
for line in p.communicate()[0].splitlines():
|
||||
if 'InstallPath' in line:
|
||||
dxsdk_dir = line.split(' ')[3] + "\\"
|
||||
|
||||
# Cache return value
|
||||
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
|
||||
return dxsdk_dir
|
||||
|
||||
|
||||
class MsvsSettings(object):
|
||||
"""A class that understands the gyp 'msvs_...' values (especially the
|
||||
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
|
||||
class helps map those settings to command line options."""
|
||||
|
||||
def __init__(self, spec, generator_flags):
|
||||
self.spec = spec
|
||||
self.vs_version = GetVSVersion(generator_flags)
|
||||
self.dxsdk_dir = _FindDirectXInstallation()
|
||||
|
||||
# Try to find an installation location for the Windows DDK by checking
|
||||
# the WDK_DIR environment variable, may be None.
|
||||
self.wdk_dir = os.environ.get('WDK_DIR')
|
||||
|
||||
supported_fields = [
|
||||
('msvs_configuration_attributes', dict),
|
||||
('msvs_settings', dict),
|
||||
('msvs_system_include_dirs', list),
|
||||
('msvs_disabled_warnings', list),
|
||||
('msvs_precompiled_header', str),
|
||||
('msvs_precompiled_source', str),
|
||||
('msvs_target_platform', str),
|
||||
]
|
||||
configs = spec['configurations']
|
||||
for field, default in supported_fields:
|
||||
setattr(self, field, {})
|
||||
for configname, config in configs.iteritems():
|
||||
getattr(self, field)[configname] = config.get(field, default())
|
||||
|
||||
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
|
||||
|
||||
def GetVSMacroEnv(self, base_to_build=None, config=None):
|
||||
"""Get a dict of variables mapping internal VS macro names to their gyp
|
||||
equivalents."""
|
||||
target_platform = self.GetTargetPlatform(config)
|
||||
target_platform = {'x86': 'Win32'}.get(target_platform, target_platform)
|
||||
replacements = {
|
||||
'$(VSInstallDir)': self.vs_version.Path(),
|
||||
'$(VCInstallDir)': os.path.join(self.vs_version.Path(), 'VC') + '\\',
|
||||
'$(OutDir)\\': base_to_build + '\\' if base_to_build else '',
|
||||
'$(IntDir)': '$!INTERMEDIATE_DIR',
|
||||
'$(InputPath)': '${source}',
|
||||
'$(InputName)': '${root}',
|
||||
'$(ProjectName)': self.spec['target_name'],
|
||||
'$(PlatformName)': target_platform,
|
||||
'$(ProjectDir)\\': '',
|
||||
}
|
||||
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
|
||||
# set. This happens when the SDK is sync'd via src-internal, rather than
|
||||
# by typical end-user installation of the SDK. If it's not set, we don't
|
||||
# want to leave the unexpanded variable in the path, so simply strip it.
|
||||
replacements['$(DXSDK_DIR)'] = self.dxsdk_dir if self.dxsdk_dir else ''
|
||||
replacements['$(WDK_DIR)'] = self.wdk_dir if self.wdk_dir else ''
|
||||
return replacements
|
||||
|
||||
def ConvertVSMacros(self, s, base_to_build=None, config=None):
|
||||
"""Convert from VS macro names to something equivalent."""
|
||||
env = self.GetVSMacroEnv(base_to_build, config=config)
|
||||
return ExpandMacros(s, env)
|
||||
|
||||
def AdjustLibraries(self, libraries):
|
||||
"""Strip -l from library if it's specified with that."""
|
||||
return [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
|
||||
|
||||
def _GetAndMunge(self, field, path, default, prefix, append, map):
|
||||
"""Retrieve a value from |field| at |path| or return |default|. If
|
||||
|append| is specified, and the item is found, it will be appended to that
|
||||
object instead of returned. If |map| is specified, results will be
|
||||
remapped through |map| before being returned or appended."""
|
||||
result = _GenericRetrieve(field, default, path)
|
||||
result = _DoRemapping(result, map)
|
||||
result = _AddPrefix(result, prefix)
|
||||
return _AppendOrReturn(append, result)
|
||||
|
||||
class _GetWrapper(object):
|
||||
def __init__(self, parent, field, base_path, append=None):
|
||||
self.parent = parent
|
||||
self.field = field
|
||||
self.base_path = [base_path]
|
||||
self.append = append
|
||||
def __call__(self, name, map=None, prefix='', default=None):
|
||||
return self.parent._GetAndMunge(self.field, self.base_path + [name],
|
||||
default=default, prefix=prefix, append=self.append, map=map)
|
||||
|
||||
def GetTargetPlatform(self, config):
|
||||
target_platform = self.msvs_target_platform.get(config, '')
|
||||
if not target_platform:
|
||||
target_platform = 'Win32'
|
||||
return {'Win32': 'x86'}.get(target_platform, target_platform)
|
||||
|
||||
def _RealConfig(self, config):
|
||||
target_platform = self.GetTargetPlatform(config)
|
||||
if target_platform == 'x64' and not config.endswith('_x64'):
|
||||
config += '_x64'
|
||||
return config
|
||||
|
||||
def _Setting(self, path, config,
|
||||
default=None, prefix='', append=None, map=None):
|
||||
"""_GetAndMunge for msvs_settings."""
|
||||
config = self._RealConfig(config)
|
||||
return self._GetAndMunge(
|
||||
self.msvs_settings[config], path, default, prefix, append, map)
|
||||
|
||||
def _ConfigAttrib(self, path, config,
|
||||
default=None, prefix='', append=None, map=None):
|
||||
"""_GetAndMunge for msvs_configuration_attributes."""
|
||||
config = self._RealConfig(config)
|
||||
return self._GetAndMunge(
|
||||
self.msvs_configuration_attributes[config],
|
||||
path, default, prefix, append, map)
|
||||
|
||||
def AdjustIncludeDirs(self, include_dirs, config):
|
||||
"""Updates include_dirs to expand VS specific paths, and adds the system
|
||||
include dirs used for platform SDK and similar."""
|
||||
config = self._RealConfig(config)
|
||||
includes = include_dirs + self.msvs_system_include_dirs[config]
|
||||
includes.extend(self._Setting(
|
||||
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
|
||||
return [self.ConvertVSMacros(p, config=config) for p in includes]
|
||||
|
||||
def GetComputedDefines(self, config):
|
||||
"""Returns the set of defines that are injected to the defines list based
|
||||
on other VS settings."""
|
||||
config = self._RealConfig(config)
|
||||
defines = []
|
||||
if self._ConfigAttrib(['CharacterSet'], config) == '1':
|
||||
defines.extend(('_UNICODE', 'UNICODE'))
|
||||
if self._ConfigAttrib(['CharacterSet'], config) == '2':
|
||||
defines.append('_MBCS')
|
||||
defines.extend(self._Setting(
|
||||
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
|
||||
return defines
|
||||
|
||||
def GetOutputName(self, config, expand_special):
|
||||
"""Gets the explicitly overridden output name for a target or returns None
|
||||
if it's not overridden."""
|
||||
config = self._RealConfig(config)
|
||||
type = self.spec['type']
|
||||
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
|
||||
# TODO(scottmg): Handle OutputDirectory without OutputFile.
|
||||
output_file = self._Setting((root, 'OutputFile'), config)
|
||||
if output_file:
|
||||
output_file = expand_special(self.ConvertVSMacros(
|
||||
output_file, config=config))
|
||||
return output_file
|
||||
|
||||
def GetCflags(self, config):
|
||||
"""Returns the flags that need to be added to .c and .cc compilations."""
|
||||
config = self._RealConfig(config)
|
||||
cflags = []
|
||||
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
|
||||
cl = self._GetWrapper(self, self.msvs_settings[config],
|
||||
'VCCLCompilerTool', append=cflags)
|
||||
cl('Optimization',
|
||||
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O')
|
||||
cl('InlineFunctionExpansion', prefix='/Ob')
|
||||
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
|
||||
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
|
||||
cl('WholeProgramOptimization', map={'true': '/GL'})
|
||||
cl('WarningLevel', prefix='/W')
|
||||
cl('WarnAsError', map={'true': '/WX'})
|
||||
cl('DebugInformationFormat',
|
||||
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
|
||||
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
|
||||
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
|
||||
cl('MinimalRebuild', map={'true': '/Gm'})
|
||||
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
|
||||
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
|
||||
cl('RuntimeLibrary',
|
||||
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
|
||||
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
|
||||
cl('AdditionalOptions', prefix='')
|
||||
# ninja handles parallelism by itself, don't have the compiler do it too.
|
||||
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
|
||||
return cflags
|
||||
|
||||
def GetPrecompiledHeader(self, config, gyp_to_build_path):
|
||||
"""Returns an object that handles the generation of precompiled header
|
||||
build steps."""
|
||||
config = self._RealConfig(config)
|
||||
return _PchHelper(self, config, gyp_to_build_path)
|
||||
|
||||
def _GetPchFlags(self, config, extension):
|
||||
"""Get the flags to be added to the cflags for precompiled header support.
|
||||
"""
|
||||
config = self._RealConfig(config)
|
||||
# The PCH is only built once by a particular source file. Usage of PCH must
|
||||
# only be for the same language (i.e. C vs. C++), so only include the pch
|
||||
# flags when the language matches.
|
||||
if self.msvs_precompiled_header[config]:
|
||||
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
|
||||
if _LanguageMatchesForPch(source_ext, extension):
|
||||
pch = os.path.split(self.msvs_precompiled_header[config])[1]
|
||||
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
|
||||
return []
|
||||
|
||||
def GetCflagsC(self, config):
|
||||
"""Returns the flags that need to be added to .c compilations."""
|
||||
config = self._RealConfig(config)
|
||||
return self._GetPchFlags(config, '.c')
|
||||
|
||||
def GetCflagsCC(self, config):
|
||||
"""Returns the flags that need to be added to .cc compilations."""
|
||||
config = self._RealConfig(config)
|
||||
return ['/TP'] + self._GetPchFlags(config, '.cc')
|
||||
|
||||
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
|
||||
"""Get and normalize the list of paths in AdditionalLibraryDirectories
|
||||
setting."""
|
||||
config = self._RealConfig(config)
|
||||
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
|
||||
config, default=[])
|
||||
libpaths = [os.path.normpath(
|
||||
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
|
||||
for p in libpaths]
|
||||
return ['/LIBPATH:"' + p + '"' for p in libpaths]
|
||||
|
||||
def GetLibFlags(self, config, gyp_to_build_path):
|
||||
"""Returns the flags that need to be added to lib commands."""
|
||||
config = self._RealConfig(config)
|
||||
libflags = []
|
||||
lib = self._GetWrapper(self, self.msvs_settings[config],
|
||||
'VCLibrarianTool', append=libflags)
|
||||
libflags.extend(self._GetAdditionalLibraryDirectories(
|
||||
'VCLibrarianTool', config, gyp_to_build_path))
|
||||
lib('AdditionalOptions')
|
||||
return libflags
|
||||
|
||||
def _GetDefFileAsLdflags(self, spec, ldflags, gyp_to_build_path):
|
||||
""".def files get implicitly converted to a ModuleDefinitionFile for the
|
||||
linker in the VS generator. Emulate that behaviour here."""
|
||||
def_file = ''
|
||||
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
|
||||
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
|
||||
if len(def_files) == 1:
|
||||
ldflags.append('/DEF:"%s"' % gyp_to_build_path(def_files[0]))
|
||||
elif len(def_files) > 1:
|
||||
raise Exception("Multiple .def files")
|
||||
|
||||
def GetLdflags(self, config, gyp_to_build_path, expand_special,
|
||||
manifest_base_name, is_executable):
|
||||
"""Returns the flags that need to be added to link commands, and the
|
||||
manifest files."""
|
||||
config = self._RealConfig(config)
|
||||
ldflags = []
|
||||
ld = self._GetWrapper(self, self.msvs_settings[config],
|
||||
'VCLinkerTool', append=ldflags)
|
||||
self._GetDefFileAsLdflags(self.spec, ldflags, gyp_to_build_path)
|
||||
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
|
||||
ld('TargetMachine', map={'1': 'X86', '17': 'X64'}, prefix='/MACHINE:')
|
||||
ldflags.extend(self._GetAdditionalLibraryDirectories(
|
||||
'VCLinkerTool', config, gyp_to_build_path))
|
||||
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
|
||||
out = self.GetOutputName(config, expand_special)
|
||||
if out:
|
||||
ldflags.append('/OUT:' + out)
|
||||
ld('AdditionalOptions', prefix='')
|
||||
ld('SubSystem', map={'1': 'CONSOLE', '2': 'WINDOWS'}, prefix='/SUBSYSTEM:')
|
||||
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
|
||||
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
|
||||
ld('RandomizedBaseAddress',
|
||||
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
|
||||
ld('DataExecutionPrevention',
|
||||
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
|
||||
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
|
||||
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
|
||||
ld('LinkTimeCodeGeneration', map={'1': '/LTCG'})
|
||||
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
|
||||
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
|
||||
ld('EntryPointSymbol', prefix='/ENTRY:')
|
||||
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
|
||||
ld('AdditionalDependencies', prefix='')
|
||||
# TODO(scottmg): These too.
|
||||
ldflags.extend(('kernel32.lib', 'user32.lib', 'gdi32.lib', 'winspool.lib',
|
||||
'comdlg32.lib', 'advapi32.lib', 'shell32.lib', 'ole32.lib',
|
||||
'oleaut32.lib', 'uuid.lib', 'odbc32.lib', 'DelayImp.lib'))
|
||||
|
||||
# If the base address is not specifically controlled, DYNAMICBASE should
|
||||
# be on by default.
|
||||
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
|
||||
ldflags)
|
||||
if not base_flags:
|
||||
ldflags.append('/DYNAMICBASE')
|
||||
|
||||
# If the NXCOMPAT flag has not been specified, default to on. Despite the
|
||||
# documentation that says this only defaults to on when the subsystem is
|
||||
# Vista or greater (which applies to the linker), the IDE defaults it on
|
||||
# unless it's explicitly off.
|
||||
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
|
||||
ldflags.append('/NXCOMPAT')
|
||||
|
||||
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
|
||||
manifest_flags, intermediate_manifest_file = self._GetLdManifestFlags(
|
||||
config, manifest_base_name, is_executable and not have_def_file)
|
||||
ldflags.extend(manifest_flags)
|
||||
manifest_files = self._GetAdditionalManifestFiles(config, gyp_to_build_path)
|
||||
manifest_files.append(intermediate_manifest_file)
|
||||
|
||||
return ldflags, manifest_files
|
||||
|
||||
def _GetLdManifestFlags(self, config, name, allow_isolation):
|
||||
"""Returns the set of flags that need to be added to the link to generate
|
||||
a default manifest, as well as the name of the generated file."""
|
||||
# Add manifest flags that mirror the defaults in VS. Chromium dev builds
|
||||
# do not currently use any non-default settings, but we could parse
|
||||
# VCManifestTool blocks if Chromium or other projects need them in the
|
||||
# future. Of particular note, we do not yet support EmbedManifest because
|
||||
# it complicates incremental linking.
|
||||
output_name = name + '.intermediate.manifest'
|
||||
flags = [
|
||||
'/MANIFEST',
|
||||
'/ManifestFile:' + output_name,
|
||||
'''/MANIFESTUAC:"level='asInvoker' uiAccess='false'"'''
|
||||
]
|
||||
if allow_isolation:
|
||||
flags.append('/ALLOWISOLATION')
|
||||
return flags, output_name
|
||||
|
||||
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
|
||||
"""Gets additional manifest files that are added to the default one
|
||||
generated by the linker."""
|
||||
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
|
||||
default=[])
|
||||
if (self._Setting(
|
||||
('VCManifestTool', 'EmbedManifest'), config, default='') == 'true'):
|
||||
print 'gyp/msvs_emulation.py: "EmbedManifest: true" not yet supported.'
|
||||
if isinstance(files, str):
|
||||
files = files.split(';')
|
||||
return [os.path.normpath(
|
||||
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
|
||||
for f in files]
|
||||
|
||||
def IsUseLibraryDependencyInputs(self, config):
|
||||
"""Returns whether the target should be linked via Use Library Dependency
|
||||
Inputs (using component .objs of a given .lib)."""
|
||||
config = self._RealConfig(config)
|
||||
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
|
||||
return uldi == 'true'
|
||||
|
||||
def GetRcflags(self, config, gyp_to_ninja_path):
|
||||
"""Returns the flags that need to be added to invocations of the resource
|
||||
compiler."""
|
||||
config = self._RealConfig(config)
|
||||
rcflags = []
|
||||
rc = self._GetWrapper(self, self.msvs_settings[config],
|
||||
'VCResourceCompilerTool', append=rcflags)
|
||||
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
|
||||
rcflags.append('/I' + gyp_to_ninja_path('.'))
|
||||
rc('PreprocessorDefinitions', prefix='/d')
|
||||
# /l arg must be in hex without leading '0x'
|
||||
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
|
||||
return rcflags
|
||||
|
||||
def BuildCygwinBashCommandLine(self, args, path_to_base):
|
||||
"""Build a command line that runs args via cygwin bash. We assume that all
|
||||
incoming paths are in Windows normpath'd form, so they need to be
|
||||
converted to posix style for the part of the command line that's passed to
|
||||
bash. We also have to do some Visual Studio macro emulation here because
|
||||
various rules use magic VS names for things. Also note that rules that
|
||||
contain ninja variables cannot be fixed here (for example ${source}), so
|
||||
the outer generator needs to make sure that the paths that are written out
|
||||
are in posix style, if the command line will be used here."""
|
||||
cygwin_dir = os.path.normpath(
|
||||
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
|
||||
cd = ('cd %s' % path_to_base).replace('\\', '/')
|
||||
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
|
||||
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
|
||||
bash_cmd = ' '.join(args)
|
||||
cmd = (
|
||||
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
|
||||
'bash -c "%s ; %s"' % (cd, bash_cmd))
|
||||
return cmd
|
||||
|
||||
def IsRuleRunUnderCygwin(self, rule):
|
||||
"""Determine if an action should be run under cygwin. If the variable is
|
||||
unset, or set to 1 we use cygwin."""
|
||||
return int(rule.get('msvs_cygwin_shell',
|
||||
self.spec.get('msvs_cygwin_shell', 1))) != 0
|
||||
|
||||
def HasExplicitIdlRules(self, spec):
|
||||
"""Determine if there's an explicit rule for idl files. When there isn't we
|
||||
need to generate implicit rules to build MIDL .idl files."""
|
||||
for rule in spec.get('rules', []):
|
||||
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
|
||||
return True
|
||||
return False
|
||||
|
||||
def GetIdlBuildData(self, source, config):
|
||||
"""Determine the implicit outputs for an idl file. Returns output
|
||||
directory, outputs, and variables and flags that are required."""
|
||||
config = self._RealConfig(config)
|
||||
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
|
||||
def midl(name, default=None):
|
||||
return self.ConvertVSMacros(midl_get(name, default=default),
|
||||
config=config)
|
||||
tlb = midl('TypeLibraryName', default='${root}.tlb')
|
||||
header = midl('HeaderFileName', default='${root}.h')
|
||||
dlldata = midl('DLLDataFileName', default='dlldata.c')
|
||||
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
|
||||
proxy = midl('ProxyFileName', default='${root}_p.c')
|
||||
# Note that .tlb is not included in the outputs as it is not always
|
||||
# generated depending on the content of the input idl file.
|
||||
outdir = midl('OutputDirectory', default='')
|
||||
output = [header, dlldata, iid, proxy]
|
||||
variables = [('tlb', tlb),
|
||||
('h', header),
|
||||
('dlldata', dlldata),
|
||||
('iid', iid),
|
||||
('proxy', proxy)]
|
||||
# TODO(scottmg): Are there configuration settings to set these flags?
|
||||
flags = ['/char', 'signed', '/env', 'win32', '/Oicf']
|
||||
return outdir, output, variables, flags
|
||||
|
||||
|
||||
def _LanguageMatchesForPch(source_ext, pch_source_ext):
|
||||
c_exts = ('.c',)
|
||||
cc_exts = ('.cc', '.cxx', '.cpp')
|
||||
return ((source_ext in c_exts and pch_source_ext in c_exts) or
|
||||
(source_ext in cc_exts and pch_source_ext in cc_exts))
|
||||
|
||||
class PrecompiledHeader(object):
|
||||
"""Helper to generate dependencies and build rules to handle generation of
|
||||
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
|
||||
"""
|
||||
def __init__(self, settings, config, gyp_to_build_path):
|
||||
self.settings = settings
|
||||
self.config = config
|
||||
self.gyp_to_build_path = gyp_to_build_path
|
||||
|
||||
def _PchHeader(self):
|
||||
"""Get the header that will appear in an #include line for all source
|
||||
files."""
|
||||
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
|
||||
|
||||
def _PchSource(self):
|
||||
"""Get the source file that is built once to compile the pch data."""
|
||||
return self.gyp_to_build_path(
|
||||
self.settings.msvs_precompiled_source[self.config])
|
||||
|
||||
def _PchOutput(self):
|
||||
"""Get the name of the output of the compiled pch data."""
|
||||
return '${pchprefix}.' + self._PchHeader() + '.pch'
|
||||
|
||||
def GetObjDependencies(self, sources, objs):
|
||||
"""Given a list of sources files and the corresponding object files,
|
||||
returns a list of the pch files that should be depended upon. The
|
||||
additional wrapping in the return value is for interface compatability
|
||||
with make.py on Mac, and xcode_emulation.py."""
|
||||
if not self._PchHeader():
|
||||
return []
|
||||
source = self._PchSource()
|
||||
assert source
|
||||
pch_ext = os.path.splitext(self._PchSource())[1]
|
||||
for source in sources:
|
||||
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
|
||||
return [(None, None, self._PchOutput())]
|
||||
return []
|
||||
|
||||
def GetPchBuildCommands(self):
|
||||
"""Returns [(path_to_pch, language_flag, language, header)].
|
||||
|path_to_gch| and |header| are relative to the build directory."""
|
||||
header = self._PchHeader()
|
||||
source = self._PchSource()
|
||||
if not source or not header:
|
||||
return []
|
||||
ext = os.path.splitext(source)[1]
|
||||
lang = 'c' if ext == '.c' else 'cc'
|
||||
return [(self._PchOutput(), '/Yc' + header, lang, source)]
|
||||
|
||||
|
||||
vs_version = None
|
||||
def GetVSVersion(generator_flags):
|
||||
global vs_version
|
||||
if not vs_version:
|
||||
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
|
||||
generator_flags.get('msvs_version', 'auto'))
|
||||
return vs_version
|
||||
|
||||
def _GetVsvarsSetupArgs(generator_flags, arch):
|
||||
vs = GetVSVersion(generator_flags)
|
||||
return vs.SetupScript()
|
||||
|
||||
def ExpandMacros(string, expansions):
|
||||
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
|
||||
for the canonical way to retrieve a suitable dict."""
|
||||
if '$' in string:
|
||||
for old, new in expansions.iteritems():
|
||||
assert '$(' not in new, new
|
||||
string = string.replace(old, new)
|
||||
return string
|
||||
|
||||
def _ExtractImportantEnvironment(output_of_set):
|
||||
"""Extracts environment variables required for the toolchain to run from
|
||||
a textual dump output by the cmd.exe 'set' command."""
|
||||
envvars_to_save = (
|
||||
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
|
||||
'include',
|
||||
'lib',
|
||||
'libpath',
|
||||
'path',
|
||||
'pathext',
|
||||
'systemroot',
|
||||
'temp',
|
||||
'tmp',
|
||||
)
|
||||
env = {}
|
||||
for line in output_of_set.splitlines():
|
||||
for envvar in envvars_to_save:
|
||||
if re.match(envvar + '=', line.lower()):
|
||||
var, setting = line.split('=', 1)
|
||||
if envvar == 'path':
|
||||
# Our own rules (for running gyp-win-tool) and other actions in
|
||||
# Chromium rely on python being in the path. Add the path to this
|
||||
# python here so that if it's not in the path when ninja is run
|
||||
# later, python will still be found.
|
||||
setting = os.path.dirname(sys.executable) + os.pathsep + setting
|
||||
env[var.upper()] = setting
|
||||
break
|
||||
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
|
||||
if required not in env:
|
||||
raise Exception('Environment variable "%s" '
|
||||
'required to be set to valid path' % required)
|
||||
return env
|
||||
|
||||
def _FormatAsEnvironmentBlock(envvar_dict):
|
||||
"""Format as an 'environment block' directly suitable for CreateProcess.
|
||||
Briefly this is a list of key=value\0, terminated by an additional \0. See
|
||||
CreateProcess documentation for more details."""
|
||||
block = ''
|
||||
nul = '\0'
|
||||
for key, value in envvar_dict.iteritems():
|
||||
block += key + '=' + value + nul
|
||||
block += nul
|
||||
return block
|
||||
|
||||
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags, open_out):
|
||||
"""It's not sufficient to have the absolute path to the compiler, linker,
|
||||
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
|
||||
need to support both x86 and x64 compilers within the same build (to support
|
||||
msvs_target_platform hackery). Different architectures require a different
|
||||
compiler binary, and different supporting environment variables (INCLUDE,
|
||||
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
|
||||
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
|
||||
sets up the environment, and then we do not prefix the compiler with
|
||||
an absolute path, instead preferring something like "cl.exe" in the rule
|
||||
which will then run whichever the environment setup has put in the path."""
|
||||
vs = GetVSVersion(generator_flags)
|
||||
for arch in ('x86', 'x64'):
|
||||
args = vs.SetupScript(arch)
|
||||
args.extend(('&&', 'set'))
|
||||
popen = subprocess.Popen(
|
||||
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
variables, _ = popen.communicate()
|
||||
env = _ExtractImportantEnvironment(variables)
|
||||
env_block = _FormatAsEnvironmentBlock(env)
|
||||
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
|
||||
f.write(env_block)
|
||||
f.close()
|
153
google-breakpad/src/tools/gyp/pylib/gyp/ninja_syntax.py
Normal file
153
google-breakpad/src/tools/gyp/pylib/gyp/ninja_syntax.py
Normal file
@ -0,0 +1,153 @@
|
||||
# This file comes from
|
||||
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
|
||||
# Do not edit! Edit the upstream one instead.
|
||||
|
||||
"""Python module for generating .ninja files.
|
||||
|
||||
Note that this is emphatically not a required piece of Ninja; it's
|
||||
just a helpful utility for build-file-generation systems that already
|
||||
use Python.
|
||||
"""
|
||||
|
||||
import textwrap
|
||||
import re
|
||||
|
||||
def escape_spaces(word):
|
||||
return word.replace('$ ','$$ ').replace(' ','$ ')
|
||||
|
||||
class Writer(object):
|
||||
def __init__(self, output, width=78):
|
||||
self.output = output
|
||||
self.width = width
|
||||
|
||||
def newline(self):
|
||||
self.output.write('\n')
|
||||
|
||||
def comment(self, text):
|
||||
for line in textwrap.wrap(text, self.width - 2):
|
||||
self.output.write('# ' + line + '\n')
|
||||
|
||||
def variable(self, key, value, indent=0):
|
||||
if value is None:
|
||||
return
|
||||
if isinstance(value, list):
|
||||
value = ' '.join(filter(None, value)) # Filter out empty strings.
|
||||
self._line('%s = %s' % (key, value), indent)
|
||||
|
||||
def rule(self, name, command, description=None, depfile=None,
|
||||
generator=False, restat=False, rspfile=None,
|
||||
rspfile_content=None):
|
||||
self._line('rule %s' % name)
|
||||
self.variable('command', command, indent=1)
|
||||
if description:
|
||||
self.variable('description', description, indent=1)
|
||||
if depfile:
|
||||
self.variable('depfile', depfile, indent=1)
|
||||
if generator:
|
||||
self.variable('generator', '1', indent=1)
|
||||
if restat:
|
||||
self.variable('restat', '1', indent=1)
|
||||
if rspfile:
|
||||
self.variable('rspfile', rspfile, indent=1)
|
||||
if rspfile_content:
|
||||
self.variable('rspfile_content', rspfile_content, indent=1)
|
||||
|
||||
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
|
||||
variables=None):
|
||||
outputs = self._as_list(outputs)
|
||||
all_inputs = self._as_list(inputs)[:]
|
||||
out_outputs = list(map(escape_spaces, outputs))
|
||||
all_inputs = list(map(escape_spaces, all_inputs))
|
||||
|
||||
if implicit:
|
||||
implicit = map(escape_spaces, self._as_list(implicit))
|
||||
all_inputs.append('|')
|
||||
all_inputs.extend(implicit)
|
||||
if order_only:
|
||||
order_only = map(escape_spaces, self._as_list(order_only))
|
||||
all_inputs.append('||')
|
||||
all_inputs.extend(order_only)
|
||||
|
||||
self._line('build %s: %s %s' % (' '.join(out_outputs),
|
||||
rule,
|
||||
' '.join(all_inputs)))
|
||||
|
||||
if variables:
|
||||
if isinstance(variables, dict):
|
||||
iterator = variables.iteritems()
|
||||
else:
|
||||
iterator = iter(variables)
|
||||
|
||||
for key, val in iterator:
|
||||
self.variable(key, val, indent=1)
|
||||
|
||||
return outputs
|
||||
|
||||
def include(self, path):
|
||||
self._line('include %s' % path)
|
||||
|
||||
def subninja(self, path):
|
||||
self._line('subninja %s' % path)
|
||||
|
||||
def default(self, paths):
|
||||
self._line('default %s' % ' '.join(self._as_list(paths)))
|
||||
|
||||
def _count_dollars_before_index(self, s, i):
|
||||
"""Returns the number of '$' characters right in front of s[i]."""
|
||||
dollar_count = 0
|
||||
dollar_index = i - 1
|
||||
while dollar_index > 0 and s[dollar_index] == '$':
|
||||
dollar_count += 1
|
||||
dollar_index -= 1
|
||||
return dollar_count
|
||||
|
||||
def _line(self, text, indent=0):
|
||||
"""Write 'text' word-wrapped at self.width characters."""
|
||||
leading_space = ' ' * indent
|
||||
while len(leading_space) + len(text) > self.width:
|
||||
# The text is too wide; wrap if possible.
|
||||
|
||||
# Find the rightmost space that would obey our width constraint and
|
||||
# that's not an escaped space.
|
||||
available_space = self.width - len(leading_space) - len(' $')
|
||||
space = available_space
|
||||
while True:
|
||||
space = text.rfind(' ', 0, space)
|
||||
if space < 0 or \
|
||||
self._count_dollars_before_index(text, space) % 2 == 0:
|
||||
break
|
||||
|
||||
if space < 0:
|
||||
# No such space; just use the first unescaped space we can find.
|
||||
space = available_space - 1
|
||||
while True:
|
||||
space = text.find(' ', space + 1)
|
||||
if space < 0 or \
|
||||
self._count_dollars_before_index(text, space) % 2 == 0:
|
||||
break
|
||||
if space < 0:
|
||||
# Give up on breaking.
|
||||
break
|
||||
|
||||
self.output.write(leading_space + text[0:space] + ' $\n')
|
||||
text = text[space+1:]
|
||||
|
||||
# Subsequent lines are continuations, so indent them.
|
||||
leading_space = ' ' * (indent+2)
|
||||
|
||||
self.output.write(leading_space + text + '\n')
|
||||
|
||||
def _as_list(self, input):
|
||||
if input is None:
|
||||
return []
|
||||
if isinstance(input, list):
|
||||
return input
|
||||
return [input]
|
||||
|
||||
|
||||
def escape(string):
|
||||
"""Escape a string such that it can be embedded into a Ninja file without
|
||||
further interpretation."""
|
||||
assert '\n' not in string, 'Ninja syntax does not allow newlines'
|
||||
# We only have one special metacharacter: '$'.
|
||||
return string.replace('$', '$$')
|
51
google-breakpad/src/tools/gyp/pylib/gyp/sun_tool.py
Normal file
51
google-breakpad/src/tools/gyp/pylib/gyp/sun_tool.py
Normal file
@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""These functions are executed via gyp-sun-tool when using the Makefile
|
||||
generator."""
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def main(args):
|
||||
executor = SunTool()
|
||||
executor.Dispatch(args)
|
||||
|
||||
|
||||
class SunTool(object):
|
||||
"""This class performs all the SunOS tooling steps. The methods can either be
|
||||
executed directly, or dispatched from an argument list."""
|
||||
|
||||
def Dispatch(self, args):
|
||||
"""Dispatches a string command to a method."""
|
||||
if len(args) < 1:
|
||||
raise Exception("Not enough arguments")
|
||||
|
||||
method = "Exec%s" % self._CommandifyName(args[0])
|
||||
getattr(self, method)(*args[1:])
|
||||
|
||||
def _CommandifyName(self, name_string):
|
||||
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
|
||||
return name_string.title().replace('-', '')
|
||||
|
||||
def ExecFlock(self, lockfile, *cmd_list):
|
||||
"""Emulates the most basic behavior of Linux's flock(1)."""
|
||||
# Rely on exception handling to report errors.
|
||||
# Note that the stock python on SunOS has a bug
|
||||
# where fcntl.flock(fd, LOCK_EX) always fails
|
||||
# with EBADF, that's why we use this F_SETLK
|
||||
# hack instead.
|
||||
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
|
||||
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
|
||||
fcntl.fcntl(fd, fcntl.F_SETLK, op)
|
||||
return subprocess.call(cmd_list)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]))
|
68
google-breakpad/src/tools/gyp/pylib/gyp/system_test.py
Normal file
68
google-breakpad/src/tools/gyp/pylib/gyp/system_test.py
Normal file
@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
|
||||
def TestCommands(commands, files={}, env={}):
|
||||
"""Run commands in a temporary directory, returning true if they all succeed.
|
||||
Return false on failures or if any commands produce output.
|
||||
|
||||
Arguments:
|
||||
commands: an array of shell-interpretable commands, e.g. ['ls -l', 'pwd']
|
||||
each will be expanded with Python %-expansion using env first.
|
||||
files: a dictionary mapping filename to contents;
|
||||
files will be created in the temporary directory before running
|
||||
the command.
|
||||
env: a dictionary of strings to expand commands with.
|
||||
"""
|
||||
tempdir = tempfile.mkdtemp()
|
||||
try:
|
||||
for name, contents in files.items():
|
||||
f = open(os.path.join(tempdir, name), 'wb')
|
||||
f.write(contents)
|
||||
f.close()
|
||||
for command in commands:
|
||||
proc = subprocess.Popen(command % env, shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
cwd=tempdir)
|
||||
output = proc.communicate()[0]
|
||||
if proc.returncode != 0 or output:
|
||||
return False
|
||||
return True
|
||||
finally:
|
||||
shutil.rmtree(tempdir)
|
||||
return False
|
||||
|
||||
|
||||
def TestArSupportsT(ar_command='ar', cc_command='cc'):
|
||||
"""Test whether 'ar' supports the 'T' flag."""
|
||||
return TestCommands(['%(cc)s -c test.c',
|
||||
'%(ar)s crsT test.a test.o',
|
||||
'%(cc)s test.a'],
|
||||
files={'test.c': 'int main(){}'},
|
||||
env={'ar': ar_command, 'cc': cc_command})
|
||||
|
||||
|
||||
def main():
|
||||
# Run the various test functions and print the results.
|
||||
def RunTest(description, function, **kwargs):
|
||||
print "Testing " + description + ':',
|
||||
if function(**kwargs):
|
||||
print 'ok'
|
||||
else:
|
||||
print 'fail'
|
||||
RunTest("ar 'T' flag", TestArSupportsT)
|
||||
RunTest("ar 'T' flag with ccache", TestArSupportsT, cc_command='ccache cc')
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
194
google-breakpad/src/tools/gyp/pylib/gyp/win_tool.py
Normal file
194
google-breakpad/src/tools/gyp/pylib/gyp/win_tool.py
Normal file
@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Utility functions for Windows builds.
|
||||
|
||||
These functions are executed via gyp-win-tool when using the ninja generator.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import win32con
|
||||
import win32file
|
||||
import pywintypes
|
||||
|
||||
|
||||
def main(args):
|
||||
executor = WinTool()
|
||||
exit_code = executor.Dispatch(args)
|
||||
if exit_code is not None:
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
class LinkLock(object):
|
||||
"""A flock-style lock to limit the number of concurrent links to one. Based on
|
||||
http://code.activestate.com/recipes/65203-portalocker-cross-platform-posixnt-api-for-flock-s/
|
||||
"""
|
||||
def __enter__(self):
|
||||
self.file = open('LinkLock', 'w+')
|
||||
self.file_handle = win32file._get_osfhandle(self.file.fileno())
|
||||
win32file.LockFileEx(self.file_handle, win32con.LOCKFILE_EXCLUSIVE_LOCK,
|
||||
0, -0x10000, pywintypes.OVERLAPPED())
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
win32file.UnlockFileEx(
|
||||
self.file_handle, 0, -0x10000, pywintypes.OVERLAPPED())
|
||||
self.file.close()
|
||||
|
||||
|
||||
class WinTool(object):
|
||||
"""This class performs all the Windows tooling steps. The methods can either
|
||||
be executed directly, or dispatched from an argument list."""
|
||||
|
||||
def Dispatch(self, args):
|
||||
"""Dispatches a string command to a method."""
|
||||
if len(args) < 1:
|
||||
raise Exception("Not enough arguments")
|
||||
|
||||
method = "Exec%s" % self._CommandifyName(args[0])
|
||||
return getattr(self, method)(*args[1:])
|
||||
|
||||
def _CommandifyName(self, name_string):
|
||||
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
|
||||
return name_string.title().replace('-', '')
|
||||
|
||||
def _GetEnv(self, arch):
|
||||
"""Gets the saved environment from a file for a given architecture."""
|
||||
# The environment is saved as an "environment block" (see CreateProcess
|
||||
# and msvs_emulation for details). We convert to a dict here.
|
||||
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
|
||||
pairs = open(arch).read()[:-2].split('\0')
|
||||
kvs = [item.split('=', 1) for item in pairs]
|
||||
return dict(kvs)
|
||||
|
||||
def ExecStamp(self, path):
|
||||
"""Simple stamp command."""
|
||||
open(path, 'w').close()
|
||||
|
||||
def ExecRecursiveMirror(self, source, dest):
|
||||
"""Emulation of rm -rf out && cp -af in out."""
|
||||
if os.path.exists(dest):
|
||||
if os.path.isdir(dest):
|
||||
shutil.rmtree(dest)
|
||||
else:
|
||||
os.unlink(dest)
|
||||
if os.path.isdir(source):
|
||||
shutil.copytree(source, dest)
|
||||
else:
|
||||
shutil.copy2(source, dest)
|
||||
|
||||
def ExecLinkWrapper(self, arch, *args):
|
||||
"""Filter diagnostic output from link that looks like:
|
||||
' Creating library ui.dll.lib and object ui.dll.exp'
|
||||
This happens when there are exports from the dll or exe.
|
||||
"""
|
||||
with LinkLock():
|
||||
env = self._GetEnv(arch)
|
||||
popen = subprocess.Popen(args, shell=True, env=env,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
out, _ = popen.communicate()
|
||||
for line in out.splitlines():
|
||||
if not line.startswith(' Creating library '):
|
||||
print line
|
||||
return popen.returncode
|
||||
|
||||
def ExecManifestWrapper(self, arch, *args):
|
||||
"""Run manifest tool with environment set. Strip out undesirable warning
|
||||
(some XML blocks are recognized by the OS loader, but not the manifest
|
||||
tool)."""
|
||||
env = self._GetEnv(arch)
|
||||
popen = subprocess.Popen(args, shell=True, env=env,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
out, _ = popen.communicate()
|
||||
for line in out.splitlines():
|
||||
if line and 'manifest authoring warning 81010002' not in line:
|
||||
print line
|
||||
return popen.returncode
|
||||
|
||||
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
|
||||
*flags):
|
||||
"""Filter noisy filenames output from MIDL compile step that isn't
|
||||
quietable via command line flags.
|
||||
"""
|
||||
args = ['midl', '/nologo'] + list(flags) + [
|
||||
'/out', outdir,
|
||||
'/tlb', tlb,
|
||||
'/h', h,
|
||||
'/dlldata', dlldata,
|
||||
'/iid', iid,
|
||||
'/proxy', proxy,
|
||||
idl]
|
||||
env = self._GetEnv(arch)
|
||||
popen = subprocess.Popen(args, shell=True, env=env,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
out, _ = popen.communicate()
|
||||
# Filter junk out of stdout, and write filtered versions. Output we want
|
||||
# to filter is pairs of lines that look like this:
|
||||
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
|
||||
# objidl.idl
|
||||
lines = out.splitlines()
|
||||
prefix = 'Processing '
|
||||
processing = set(os.path.basename(x) for x in lines if x.startswith(prefix))
|
||||
for line in lines:
|
||||
if not line.startswith(prefix) and line not in processing:
|
||||
print line
|
||||
return popen.returncode
|
||||
|
||||
def ExecAsmWrapper(self, arch, *args):
|
||||
"""Filter logo banner from invocations of asm.exe."""
|
||||
env = self._GetEnv(arch)
|
||||
# MSVS doesn't assemble x64 asm files.
|
||||
if arch == 'environment.x64':
|
||||
return 0
|
||||
popen = subprocess.Popen(args, shell=True, env=env,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
out, _ = popen.communicate()
|
||||
for line in out.splitlines():
|
||||
if (not line.startswith('Copyright (C) Microsoft Corporation') and
|
||||
not line.startswith('Microsoft (R) Macro Assembler') and
|
||||
not line.startswith(' Assembling: ') and
|
||||
line):
|
||||
print line
|
||||
return popen.returncode
|
||||
|
||||
def ExecRcWrapper(self, arch, *args):
|
||||
"""Filter logo banner from invocations of rc.exe. Older versions of RC
|
||||
don't support the /nologo flag."""
|
||||
env = self._GetEnv(arch)
|
||||
popen = subprocess.Popen(args, shell=True, env=env,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
out, _ = popen.communicate()
|
||||
for line in out.splitlines():
|
||||
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
|
||||
not line.startswith('Copyright (C) Microsoft Corporation') and
|
||||
line):
|
||||
print line
|
||||
return popen.returncode
|
||||
|
||||
def ExecClWrapper(self, arch, depname, *args):
|
||||
"""Runs cl.exe and filters output through ninja-deplist-helper to get
|
||||
dependendency information which is stored in |depname|."""
|
||||
env = self._GetEnv(arch)
|
||||
args = ' '.join(args) + \
|
||||
'| ninja-deplist-helper -r . -q -f cl -o ' + depname + '"'
|
||||
popen = subprocess.Popen(args, shell=True, env=env)
|
||||
popen.wait()
|
||||
return popen.returncode
|
||||
|
||||
def ExecActionWrapper(self, arch, rspfile, *dir):
|
||||
"""Runs an action command line from a response file using the environment
|
||||
for |arch|. If |dir| is supplied, use that as the working directory."""
|
||||
env = self._GetEnv(arch)
|
||||
args = open(rspfile).read()
|
||||
dir = dir[0] if dir else None
|
||||
popen = subprocess.Popen(args, shell=True, env=env, cwd=dir)
|
||||
popen.wait()
|
||||
return popen.returncode
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]))
|
1044
google-breakpad/src/tools/gyp/pylib/gyp/xcode_emulation.py
Normal file
1044
google-breakpad/src/tools/gyp/pylib/gyp/xcode_emulation.py
Normal file
File diff suppressed because it is too large
Load Diff
2834
google-breakpad/src/tools/gyp/pylib/gyp/xcodeproj_file.py
Normal file
2834
google-breakpad/src/tools/gyp/pylib/gyp/xcodeproj_file.py
Normal file
File diff suppressed because it is too large
Load Diff
69
google-breakpad/src/tools/gyp/pylib/gyp/xml_fix.py
Normal file
69
google-breakpad/src/tools/gyp/pylib/gyp/xml_fix.py
Normal file
@ -0,0 +1,69 @@
|
||||
# Copyright (c) 2011 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Applies a fix to CR LF TAB handling in xml.dom.
|
||||
|
||||
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
|
||||
Working around this: http://bugs.python.org/issue5752
|
||||
TODO(bradnelson): Consider dropping this when we drop XP support.
|
||||
"""
|
||||
|
||||
|
||||
import xml.dom.minidom
|
||||
|
||||
|
||||
def _Replacement_write_data(writer, data, is_attrib=False):
|
||||
"""Writes datachars to writer."""
|
||||
data = data.replace("&", "&").replace("<", "<")
|
||||
data = data.replace("\"", """).replace(">", ">")
|
||||
if is_attrib:
|
||||
data = data.replace(
|
||||
"\r", "
").replace(
|
||||
"\n", "
").replace(
|
||||
"\t", "	")
|
||||
writer.write(data)
|
||||
|
||||
|
||||
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
|
||||
# indent = current indentation
|
||||
# addindent = indentation to add to higher levels
|
||||
# newl = newline string
|
||||
writer.write(indent+"<" + self.tagName)
|
||||
|
||||
attrs = self._get_attributes()
|
||||
a_names = attrs.keys()
|
||||
a_names.sort()
|
||||
|
||||
for a_name in a_names:
|
||||
writer.write(" %s=\"" % a_name)
|
||||
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
|
||||
writer.write("\"")
|
||||
if self.childNodes:
|
||||
writer.write(">%s" % newl)
|
||||
for node in self.childNodes:
|
||||
node.writexml(writer, indent + addindent, addindent, newl)
|
||||
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
|
||||
else:
|
||||
writer.write("/>%s" % newl)
|
||||
|
||||
|
||||
class XmlFix(object):
|
||||
"""Object to manage temporary patching of xml.dom.minidom."""
|
||||
|
||||
def __init__(self):
|
||||
# Preserve current xml.dom.minidom functions.
|
||||
self.write_data = xml.dom.minidom._write_data
|
||||
self.writexml = xml.dom.minidom.Element.writexml
|
||||
# Inject replacement versions of a function and a method.
|
||||
xml.dom.minidom._write_data = _Replacement_write_data
|
||||
xml.dom.minidom.Element.writexml = _Replacement_writexml
|
||||
|
||||
def Cleanup(self):
|
||||
if self.write_data:
|
||||
xml.dom.minidom._write_data = self.write_data
|
||||
xml.dom.minidom.Element.writexml = self.writexml
|
||||
self.write_data = None
|
||||
|
||||
def __del__(self):
|
||||
self.Cleanup()
|
307
google-breakpad/src/tools/gyp/pylintrc
Normal file
307
google-breakpad/src/tools/gyp/pylintrc
Normal file
@ -0,0 +1,307 @@
|
||||
[MASTER]
|
||||
|
||||
# Specify a configuration file.
|
||||
#rcfile=
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Profiled execution.
|
||||
profile=no
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not
|
||||
# paths.
|
||||
ignore=CVS
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=yes
|
||||
|
||||
# List of plugins (as comma separated values of python modules names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time.
|
||||
#enable=
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time (only on the command line, not in the configuration file where
|
||||
# it should appear only once).
|
||||
# C0103: Invalid name "NN" (should match [a-z_][a-z0-9_]{2,30}$)
|
||||
# C0111: Missing docstring
|
||||
# C0302: Too many lines in module (NN)
|
||||
# R0902: Too many instance attributes (N/7)
|
||||
# R0903: Too few public methods (N/2)
|
||||
# R0904: Too many public methods (NN/20)
|
||||
# R0912: Too many branches (NN/12)
|
||||
# R0913: Too many arguments (N/5)
|
||||
# R0914: Too many local variables (NN/15)
|
||||
# R0915: Too many statements (NN/50)
|
||||
# W0141: Used builtin function 'map'
|
||||
# W0142: Used * or ** magic
|
||||
# W0232: Class has no __init__ method
|
||||
# W0511: TODO
|
||||
# W0603: Using the global statement
|
||||
#
|
||||
# These should be enabled eventually:
|
||||
# C0112: Empty docstring
|
||||
# C0301: Line too long (NN/80)
|
||||
# C0321: More than one statement on single line
|
||||
# C0322: Operator not preceded by a space
|
||||
# C0323: Operator not followed by a space
|
||||
# C0324: Comma not followed by a space
|
||||
# E0101: Explicit return in __init__
|
||||
# E0102: function already defined line NN
|
||||
# E1002: Use of super on an old style class
|
||||
# E1101: Instance of 'XX' has no 'YY' member
|
||||
# E1103: Instance of 'XX' has no 'XX' member (but some types could not be inferred)
|
||||
# E0602: Undefined variable 'XX'
|
||||
# F0401: Unable to import 'XX'
|
||||
# R0201: Method could be a function
|
||||
# R0801: Similar lines in N files
|
||||
# W0102: Dangerous default value {} as argument
|
||||
# W0104: Statement seems to have no effect
|
||||
# W0105: String statement has no effect
|
||||
# W0108: Lambda may not be necessary
|
||||
# W0201: Attribute 'XX' defined outside __init__
|
||||
# W0212: Access to a protected member XX of a client class
|
||||
# W0221: Arguments number differs from overridden method
|
||||
# W0223: Method 'XX' is abstract in class 'YY' but is not overridden
|
||||
# W0231: __init__ method from base class 'XX' is not called
|
||||
# W0301: Unnecessary semicolon
|
||||
# W0311: Bad indentation. Found NN spaces, expected NN
|
||||
# W0401: Wildcard import XX
|
||||
# W0402: Uses of a deprecated module 'string'
|
||||
# W0403: Relative import 'XX', should be 'YY.XX'
|
||||
# W0404: Reimport 'XX' (imported line NN)
|
||||
# W0601: Global variable 'XX' undefined at the module level
|
||||
# W0602: Using global for 'XX' but no assignment is done
|
||||
# W0611: Unused import pprint
|
||||
# W0612: Unused variable 'XX'
|
||||
# W0613: Unused argument 'XX'
|
||||
# W0614: Unused import XX from wildcard import
|
||||
# W0621: Redefining name 'XX' from outer scope (line NN)
|
||||
# W0622: Redefining built-in 'NN'
|
||||
# W0631: Using possibly undefined loop variable 'XX'
|
||||
# W0701: Raising a string exception
|
||||
# W0702: No exception type(s) specified
|
||||
disable=C0103,C0111,C0302,R0902,R0903,R0904,R0912,R0913,R0914,R0915,W0141,W0142,W0232,W0511,W0603,C0112,C0301,C0321,C0322,C0323,C0324,E0101,E0102,E1002,E1101,E1103,E0602,F0401,R0201,R0801,W0102,W0104,W0105,W0108,W0201,W0212,W0221,W0223,W0231,W0301,W0311,W0401,W0402,W0403,W0404,W0601,W0602,W0611,W0612,W0613,W0614,W0621,W0622,W0631,W0701,W0702
|
||||
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||
# (visual studio) and html
|
||||
output-format=text
|
||||
|
||||
# Include message's id in output
|
||||
include-ids=yes
|
||||
|
||||
# Put messages in a separate file for each module / package specified on the
|
||||
# command line instead of printing them on stdout. Reports (if any) will be
|
||||
# written in a file name "pylint_global.[txt|html]".
|
||||
files-output=no
|
||||
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=no
|
||||
|
||||
# Python expression which should return a note less than 10 (10 is the highest
|
||||
# note). You have access to the variables errors warning, statement which
|
||||
# respectively contain the number of errors / warnings messages and the total
|
||||
# number of statements analyzed. This is used by the global evaluation report
|
||||
# (RP0004).
|
||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||
|
||||
# Add a comment according to your evaluation note. This is used by the global
|
||||
# evaluation report (RP0004).
|
||||
comment=no
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# A regular expression matching the beginning of the name of dummy variables
|
||||
# (i.e. not used).
|
||||
dummy-variables-rgx=_|dummy
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# List of classes names for which member attributes should not be checked
|
||||
# (useful for classes with attributes dynamically set).
|
||||
ignored-classes=SQLObject
|
||||
|
||||
# When zope mode is activated, add a predefined set of Zope acquired attributes
|
||||
# to generated-members.
|
||||
zope=no
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E0201 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=REQUEST,acl_users,aq_parent
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,XXX,TODO
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
# Ignore comments when computing similarities.
|
||||
ignore-comments=yes
|
||||
|
||||
# Ignore docstrings when computing similarities.
|
||||
ignore-docstrings=yes
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=80
|
||||
|
||||
# Maximum number of lines in a module
|
||||
max-module-lines=1000
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# Required attributes for module, separated by a comma
|
||||
required-attributes=
|
||||
|
||||
# List of builtins function names that should not be used, separated by a comma
|
||||
bad-functions=map,filter,apply,input
|
||||
|
||||
# Regular expression which should only match correct module names
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Regular expression which should only match correct module level names
|
||||
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Regular expression which should only match correct class names
|
||||
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Regular expression which should only match correct function names
|
||||
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct method names
|
||||
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct instance attribute names
|
||||
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct argument names
|
||||
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct variable names
|
||||
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match correct list comprehension /
|
||||
# generator expression variable names
|
||||
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma
|
||||
good-names=i,j,k,ex,Run,_
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma
|
||||
bad-names=foo,bar,baz,toto,tutu,tata
|
||||
|
||||
# Regular expression which should only match functions or classes name which do
|
||||
# not require a docstring
|
||||
no-docstring-rgx=__.*__
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# Maximum number of arguments for function / method
|
||||
max-args=5
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore
|
||||
ignored-argument-names=_.*
|
||||
|
||||
# Maximum number of locals for function / method body
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of return / yield for function / method body
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of branch for function / method body
|
||||
max-branchs=12
|
||||
|
||||
# Maximum number of statements in function / method body
|
||||
max-statements=50
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# List of interface methods to ignore, separated by a comma. This is used for
|
||||
# instance to not check methods defines in Zope's Interface base class.
|
||||
ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,__new__,setUp
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
|
||||
|
||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||
# given file (report RP0402 must not be disabled)
|
||||
import-graph=
|
||||
|
||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
ext-import-graph=
|
||||
|
||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
int-import-graph=
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "Exception"
|
||||
overgeneral-exceptions=Exception
|
26
google-breakpad/src/tools/gyp/setup.py
Normal file
26
google-breakpad/src/tools/gyp/setup.py
Normal file
@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2009 Google Inc. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
from distutils.core import setup
|
||||
from distutils.command.install import install
|
||||
from distutils.command.install_lib import install_lib
|
||||
from distutils.command.install_scripts import install_scripts
|
||||
|
||||
setup(
|
||||
name='gyp',
|
||||
version='0.1',
|
||||
description='Generate Your Projects',
|
||||
author='Chromium Authors',
|
||||
author_email='chromium-dev@googlegroups.com',
|
||||
url='http://code.google.com/p/gyp',
|
||||
package_dir = {'': 'pylib'},
|
||||
packages=['gyp', 'gyp.generator'],
|
||||
|
||||
scripts = ['gyp'],
|
||||
cmdclass = {'install': install,
|
||||
'install_lib': install_lib,
|
||||
'install_scripts': install_scripts},
|
||||
)
|
Loading…
Reference in New Issue
Block a user