libc: Update auto-gen scripts
Make the scripts use external/kernel-headers/original by default.
clean_header.py: Document -k<path>, add -d<path>
find_headers.py: Make kernel config files optional
update_all.py: Allow setting the path to kernel headers on the command-line
update_all.py: Better formatting of output on ttys
update_all.py: Automatically perform "git add/rm" on affected files.
SYSCALLS.TXT: Fix typo in __socketcall definition.
checksyscalls.py: Add support for superH architecture in the checks.
gensyscalls.py: Automatically perform "git add/rm" on affected files.
cpp.py: Fixed a bug that prevented certain type definitions to
be kept in the generated clean header (e.g.
struct ethtool_drvinfo in <linux/ethtool.h>)
All scripts will use the content of external/kernel-headers/original by default now.
The generated code removes all empty lines and trailing whitespace. This is useful
to ensure a unified output even if we change the parser again in the future.
The top-level disclaimer has been edited with update instructions to regenerate
the headers when needed.
Also, a warning is now inserted every 8th line in the final output:
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
Changes under kernel/arch-arm and kernel/arch-x86 should correspond to whitespace
differences and additionnal struct definitions that were missed by the previous
parser implementation.
Change-Id: Icd1c056bacd766759f3e9b7bb5d63a246f3d656a
WARNING: If you run these script, do not submit the result to gerrit for now.
It seems there are discrepancies between the content of original headers
and those currently commited under bionic/libc/kernel/.
(This problem is the main motivation to insert the warning repeatedly).
Current list of issues:
- Missing SuperH headers (i.e. external/kernel-headers/original/asm-sh)
diff --git a/libc/kernel/tools/clean_header.py b/libc/kernel/tools/clean_header.py
index dad9120..94b19ce 100755
--- a/libc/kernel/tools/clean_header.py
+++ b/libc/kernel/tools/clean_header.py
@@ -7,12 +7,12 @@
noUpdate = 1
-def cleanupFile( path ):
+def cleanupFile( path, original_path=kernel_original_path ):
"""reads an original header and perform the cleanup operation on it
this functions returns the destination path and the clean header
as a single string"""
# check the header path
- src_path = path
+ src_path = path
if not os.path.exists(src_path):
if noUpdate:
@@ -26,7 +26,6 @@
sys.stderr.write( "warning: not a file: %s\n" % path )
return None, None
- original_path = kernel_original_path
if os.path.commonprefix( [ src_path, original_path ] ) != original_path:
if noUpdate:
panic( "file is not in 'original' directory: %s\n" % path );
@@ -54,27 +53,27 @@
else:
dst_path = "common/" + src_path
- dst_path = os.path.normpath( original_path + "/../" + dst_path )
+ dst_path = os.path.normpath( kernel_cleaned_path + "/" + dst_path )
# now, let's parse the file
#
- list = cpp.BlockParser().parseFile(path)
- if not list:
+ blocks = cpp.BlockParser().parseFile(path)
+ if not blocks:
sys.stderr.write( "error: can't parse '%s'" % path )
sys.exit(1)
- list.optimizeMacros( kernel_known_macros )
- list.optimizeIf01()
- list.removeVarsAndFuncs( statics )
- list.removeComments()
- list.removeEmptyLines()
- list.removeMacroDefines( kernel_ignored_macros )
- list.insertDisclaimer( kernel.kernel_disclaimer )
- list.replaceTokens( kernel_token_replacements )
+ blocks.optimizeMacros( kernel_known_macros )
+ blocks.optimizeIf01()
+ blocks.removeVarsAndFuncs( statics )
+ blocks.replaceTokens( kernel_token_replacements )
+ blocks.removeComments()
+ blocks.removeMacroDefines( kernel_ignored_macros )
+ blocks.removeWhiteSpace()
out = StringOutput()
- list.write(out)
+ out.write( kernel_disclaimer )
+ blocks.writeWithWarning(out, kernel_warning, 4)
return dst_path, out.get()
@@ -92,12 +91,15 @@
if the content has changed. with this, you can pass more
than one file on the command-line
+ -k<path> specify path of original kernel headers
+ -d<path> specify path of cleaned kernel headers
+
<header_path> must be in a subdirectory of 'original'
""" % os.path.basename(sys.argv[0])
sys.exit(1)
try:
- optlist, args = getopt.getopt( sys.argv[1:], 'uvk:' )
+ optlist, args = getopt.getopt( sys.argv[1:], 'uvk:d:' )
except:
# unrecognized option
sys.stderr.write( "error: unrecognized option\n" )
@@ -111,6 +113,8 @@
D_setlevel(1)
elif opt == '-k':
kernel_original_path = arg
+ elif opt == '-d':
+ kernel_cleaned_path = arg
if len(args) == 0:
usage()
@@ -143,9 +147,6 @@
print "cleaning: %-*s -> %-*s (%s)" % ( 35, path, 35, dst_path, r )
- if os.environ.has_key("ANDROID_PRODUCT_OUT"):
- b.updateP4Files()
- else:
- b.updateFiles()
+ b.updateGitFiles()
sys.exit(0)
diff --git a/libc/kernel/tools/cpp.py b/libc/kernel/tools/cpp.py
index 8828a5d..8e15a67 100644
--- a/libc/kernel/tools/cpp.py
+++ b/libc/kernel/tools/cpp.py
@@ -1529,7 +1529,7 @@
class Block:
"""a class used to model a block of input source text. there are two block types:
- - direcive blocks: contain the tokens of a single pre-processor directive (e.g. #if)
+ - directive blocks: contain the tokens of a single pre-processor directive (e.g. #if)
- text blocks, contain the tokens of non-directive blocks
the cpp parser class below will transform an input source file into a list of Block
@@ -1609,6 +1609,91 @@
else:
return None
+ def removeWhiteSpace(self):
+ # Remove trailing whitespace and empty lines
+ # All whitespace is also contracted to a single space
+ if self.directive != None:
+ return
+
+ tokens = []
+ line = 0 # index of line start
+ space = -1 # index of first space, or -1
+ ii = 0
+ nn = len(self.tokens)
+ while ii < nn:
+ tok = self.tokens[ii]
+
+ # If we find a space, record its position if this is the first
+ # one the line start or the previous character. Don't append
+ # anything to tokens array yet though.
+ if tok.id == tokSPACE:
+ if space < 0:
+ space = ii
+ ii += 1
+ continue
+
+ # If this is a line space, ignore the spaces we found previously
+ # on the line, and remove empty lines.
+ if tok.id == tokLN:
+ old_line = line
+ old_space = space
+ #print "N line=%d space=%d ii=%d" % (line, space, ii)
+ ii += 1
+ line = ii
+ space = -1
+ if old_space == old_line: # line only contains spaces
+ #print "-s"
+ continue
+ if ii-1 == old_line: # line is empty
+ #print "-e"
+ continue
+ tokens.append(tok)
+ continue
+
+ # Other token, append any space range if any, converting each
+ # one to a single space character, then append the token.
+ if space >= 0:
+ jj = space
+ space = -1
+ while jj < ii:
+ tok2 = self.tokens[jj]
+ tok2.value = " "
+ tokens.append(tok2)
+ jj += 1
+
+ tokens.append(tok)
+ ii += 1
+
+ self.tokens = tokens
+
+ def writeWithWarning(self,out,warning,left_count,repeat_count):
+ # removeWhiteSpace() will sometimes creates non-directive blocks
+ # without any tokens. These come from blocks that only contained
+ # empty lines and spaces. They should not be printed in the final
+ # output, and then should not be counted for this operation.
+ #
+ if not self.directive and self.tokens == []:
+ return left_count
+
+ if self.directive:
+ out.write(str(self) + "\n")
+ left_count -= 1
+ if left_count == 0:
+ out.write(warning)
+ left_count = repeat_count
+
+ else:
+ for tok in self.tokens:
+ out.write(str(tok))
+ if tok.id == tokLN:
+ left_count -= 1
+ if left_count == 0:
+ out.write(warning)
+ left_count = repeat_count
+
+ return left_count
+
+
def __repr__(self):
"""generate the representation of a given block"""
if self.directive:
@@ -1651,7 +1736,6 @@
return result
-
class BlockList:
"""a convenience class used to hold and process a list of blocks returned by
the cpp parser"""
@@ -1694,6 +1778,10 @@
if b.isIf():
b.expr.removePrefixed(prefix,names)
+ def removeWhiteSpace(self):
+ for b in self.blocks:
+ b.removeWhiteSpace()
+
def optimizeAll(self,macros):
self.optimizeMacros(macros)
self.optimizeIf01()
@@ -1713,72 +1801,17 @@
def write(self,out):
out.write(str(self))
+ def writeWithWarning(self,out,warning,repeat_count):
+ left_count = repeat_count
+ for b in self.blocks:
+ left_count = b.writeWithWarning(out,warning,left_count,repeat_count)
+
def removeComments(self):
for b in self.blocks:
for tok in b.tokens:
if tok.id == tokSPACE:
tok.value = " "
- def removeEmptyLines(self):
- # state = 1 => previous line was tokLN
- # state = 0 => previous line was directive
- state = 1
- for b in self.blocks:
- if b.isDirective():
- #print "$$$ directive %s" % str(b)
- state = 0
- else:
- # a tokLN followed by spaces is replaced by a single tokLN
- # several successive tokLN are replaced by a single one
- #
- dst = []
- src = b.tokens
- n = len(src)
- i = 0
- #print "$$$ parsing %s" % repr(src)
- while i < n:
- # find final tokLN
- j = i
- while j < n and src[j].id != tokLN:
- j += 1
-
- if j >= n:
- # uhhh
- dst += src[i:]
- break
-
- if src[i].id == tokSPACE:
- k = i+1
- while src[k].id == tokSPACE:
- k += 1
-
- if k == j: # empty lines with spaces in it
- i = j # remove the spaces
-
- if i == j:
- # an empty line
- if state == 1:
- i += 1 # remove it
- else:
- state = 1
- dst.append(src[i])
- i += 1
- else:
- # this line is not empty, remove trailing spaces
- k = j
- while k > i and src[k-1].id == tokSPACE:
- k -= 1
-
- nn = i
- while nn < k:
- dst.append(src[nn])
- nn += 1
- dst.append(src[j])
- state = 0
- i = j+1
-
- b.tokens = dst
-
def removeVarsAndFuncs(self,knownStatics=set()):
"""remove all extern and static declarations corresponding
to variable and function declarations. we only accept typedefs
@@ -1789,66 +1822,118 @@
which is useful for optimized byteorder swap functions and
stuff like that.
"""
- # state = 1 => typedef/struct encountered
- # state = 2 => vars or func declaration encountered, skipping until ";"
# state = 0 => normal (i.e. LN + spaces)
+ # state = 1 => typedef/struct encountered, ends with ";"
+ # state = 2 => var declaration encountered, ends with ";"
+ # state = 3 => func declaration encountered, ends with "}"
state = 0
depth = 0
blocks2 = []
+ skipTokens = False
for b in self.blocks:
if b.isDirective():
blocks2.append(b)
else:
n = len(b.tokens)
i = 0
- first = 0
- if state == 2:
+ if skipTokens:
first = n
+ else:
+ first = 0
while i < n:
tok = b.tokens[i]
- if state == 0:
- bad = 0
- if tok.id in [tokLN, tokSPACE]:
- pass
- elif tok.value in [ 'struct', 'typedef', 'enum', 'union', '__extension__' ]:
- state = 1
- else:
- if tok.value in [ 'static', 'extern', '__KINLINE' ]:
- j = i+1
- ident = ""
- while j < n and not (b.tokens[j].id in [ '(', ';' ]):
- if b.tokens[j].id == tokIDENT:
- ident = b.tokens[j].value
- j += 1
- if j < n and ident in knownStatics:
- # this is a known static, we're going to keep its
- # definition in the final output
- state = 1
- else:
- #print "### skip static '%s'" % ident
- pass
-
- if state == 0:
- if i > first:
- #print "### intermediate from '%s': '%s'" % (tok.value, repr(b.tokens[first:i]))
- blocks2.append( Block(b.tokens[first:i]) )
- state = 2
- first = n
-
- else: # state > 0
- if tok.id == '{':
+ tokid = tok.id
+ # If we are not looking for the start of a new
+ # type/var/func, then skip over tokens until
+ # we find our terminator, managing the depth of
+ # accolades as we go.
+ if state > 0:
+ terminator = False
+ if tokid == '{':
depth += 1
-
- elif tok.id == '}':
+ elif tokid == '}':
if depth > 0:
depth -= 1
+ if (depth == 0) and (state == 3):
+ terminator = True
+ elif tokid == ';' and depth == 0:
+ terminator = True
- elif depth == 0 and tok.id == ';':
- if state == 2:
- first = i+1
+ if terminator:
+ # we found the terminator
state = 0
+ if skipTokens:
+ skipTokens = False
+ first = i+1
- i += 1
+ i = i+1
+ continue
+
+ # We are looking for the start of a new type/func/var
+ # ignore whitespace
+ if tokid in [tokLN, tokSPACE]:
+ i = i+1
+ continue
+
+ # Is it a new type definition, then start recording it
+ if tok.value in [ 'struct', 'typedef', 'enum', 'union', '__extension__' ]:
+ #print "$$$ keep type declr" + repr(b.tokens[i:])
+ state = 1
+ i = i+1
+ continue
+
+ # Is it a variable or function definition. If so, first
+ # try to determine which type it is, and also extract
+ # its name.
+ #
+ # We're going to parse the next tokens of the same block
+ # until we find a semi-column or a left parenthesis.
+ #
+ # The semi-column corresponds to a variable definition,
+ # the left-parenthesis to a function definition.
+ #
+ # We also assume that the var/func name is the last
+ # identifier before the terminator.
+ #
+ j = i+1
+ ident = ""
+ while j < n:
+ tokid = b.tokens[j].id
+ if tokid == '(': # a function declaration
+ state = 3
+ break
+ elif tokid == ';': # a variable declaration
+ state = 2
+ break
+ if tokid == tokIDENT:
+ ident = b.tokens[j].value
+ j += 1
+
+ if j >= n:
+ # This can only happen when the declaration
+ # does not end on the current block (e.g. with
+ # a directive mixed inside it.
+ #
+ # We will treat it as malformed because
+ # it's very hard to recover from this case
+ # without making our parser much more
+ # complex.
+ #
+ #print "### skip unterminated static '%s'" % ident
+ break
+
+ if ident in knownStatics:
+ #print "### keep var/func '%s': %s" % (ident,repr(b.tokens[i:j]))
+ pass
+ else:
+ # We're going to skip the tokens for this declaration
+ #print "### skip variable /func'%s': %s" % (ident,repr(b.tokens[i:j]))
+ if i > first:
+ blocks2.append( Block(b.tokens[first:i]))
+ skipTokens = True
+ first = n
+
+ i = i+1
if i > first:
#print "### final '%s'" % repr(b.tokens[first:i])
diff --git a/libc/kernel/tools/defaults.py b/libc/kernel/tools/defaults.py
index 5e9d6770..2bee4ec 100644
--- a/libc/kernel/tools/defaults.py
+++ b/libc/kernel/tools/defaults.py
@@ -16,7 +16,11 @@
# path to the directory containing the original kernel headers
#
-kernel_original_path = os.path.normpath( find_program_dir() + '/../original' )
+kernel_original_path = os.path.normpath( find_program_dir() + '/../../../../external/kernel-headers/original' )
+
+# path to the default location of the cleaned-up headers
+#
+kernel_cleaned_path = os.path.normpath( find_program_dir() + '/..' )
# a special value that is used to indicate that a given macro is known to be
# undefined during optimization
@@ -112,6 +116,18 @@
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
+ *** To edit the content of this header, modify the corresponding
+ *** source file (e.g. under external/kernel-headers/original/) then
+ *** run bionic/libc/kernel/tools/update_all.py
+ ***
+ *** Any manual change here will be lost the next time this script will
+ *** be run. You've been warned!
+ ***
****************************************************************************
****************************************************************************/
"""
+
+# This is the warning line that will be inserted every N-th line in the output
+kernel_warning = """\
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+"""
diff --git a/libc/kernel/tools/find_headers.py b/libc/kernel/tools/find_headers.py
index 8e72bb6..3d622a8 100755
--- a/libc/kernel/tools/find_headers.py
+++ b/libc/kernel/tools/find_headers.py
@@ -3,7 +3,7 @@
# this program is used to find source code that includes linux kernel headers directly
# (e.g. with #include <linux/...> or #include <asm/...>)
#
-# then it lists
+# then it lists them on the standard output.
import sys, cpp, glob, os, re, getopt, kernel
from utils import *
@@ -12,20 +12,14 @@
program_dir = find_program_dir()
wanted_archs = kernel_archs
-wanted_include = os.path.normpath(program_dir + '/../original')
-wanted_config = os.path.normpath(program_dir + '/../original/config')
+wanted_config = None
def usage():
print """\
- usage: find_headers.py [options] (file|directory|@listfile)+
+ usage: find_headers.py [options] <kernel-root> (file|directory|@listfile)+
options:
- -d <include-dir> specify alternate kernel headers
- 'include' directory
- ('%s' by default)
-
- -c <file> specify alternate .config file
- ('%s' by default)
+ -c <file> specify .config file (none by default)
-a <archs> used to specify an alternative list
of architectures to support
@@ -37,12 +31,12 @@
by a set of source files or directories containing them. the search
is recursive to find *all* required files.
-""" % ( wanted_include, wanted_config, string.join(kernel_archs,",") )
+""" % ( string.join(kernel_archs,",") )
sys.exit(1)
try:
- optlist, args = getopt.getopt( sys.argv[1:], 'vc:d:a:' )
+ optlist, args = getopt.getopt( sys.argv[1:], 'vc:d:a:k:' )
except:
# unrecognized option
print "error: unrecognized option"
@@ -51,8 +45,6 @@
for opt, arg in optlist:
if opt == '-a':
wanted_archs = string.split(arg,',')
- elif opt == '-d':
- wanted_include = arg
elif opt == '-c':
wanted_config = arg
elif opt == '-v':
@@ -62,10 +54,10 @@
else:
usage()
-if len(args) < 1:
+if len(args) < 2:
usage()
-kernel_root = wanted_include
+kernel_root = args[0]
if not os.path.exists(kernel_root):
sys.stderr.write( "error: directory '%s' does not exist\n" % kernel_root )
sys.exit(1)
@@ -74,26 +66,26 @@
sys.stderr.write( "error: '%s' is not a directory\n" % kernel_root )
sys.exit(1)
-if not os.path.isdir(kernel_root+"/linux"):
- sys.stderr.write( "error: '%s' does not have a 'linux' directory\n" % kernel_root )
+if not os.path.isdir(kernel_root+"/include/linux"):
+ sys.stderr.write( "error: '%s' does not have an 'include/linux' directory\n" % kernel_root )
sys.exit(1)
-if not os.path.exists(wanted_config):
- sys.stderr.write( "error: file '%s' does not exist\n" % wanted_config )
- sys.exit(1)
+if wanted_config:
+ if not os.path.exists(wanted_config):
+ sys.stderr.write( "error: file '%s' does not exist\n" % wanted_config )
+ sys.exit(1)
-if not os.path.isfile(wanted_config):
- sys.stderr.write( "error: '%s' is not a file\n" % wanted_config )
- sys.exit(1)
+ if not os.path.isfile(wanted_config):
+ sys.stderr.write( "error: '%s' is not a file\n" % wanted_config )
+ sys.exit(1)
# find all architectures in the kernel tree
-re_asm_ = re.compile(r"asm-(\w+)")
archs = []
-for dir in os.listdir(kernel_root):
- m = re_asm_.match(dir)
- if m:
- if verbose: print ">> found kernel arch '%s'" % m.group(1)
- archs.append(m.group(1))
+for archdir in os.listdir(kernel_root+"/arch"):
+ if os.path.exists("%s/arch/%s/include/asm" % (kernel_root, archdir)):
+ if verbose:
+ print "Found arch '%s'" % archdir
+ archs.append(archdir)
# if we're using the 'kernel_headers' directory, there is only asm/
# and no other asm-<arch> directories (arm is assumed, which sucks)
@@ -126,6 +118,7 @@
# helper function used to walk the user files
def parse_file(path, parser):
+ #print "parse %s" % path
parser.parseFile(path)
@@ -136,7 +129,8 @@
# try to read the config file
try:
cparser = kernel.ConfigParser()
- cparser.parseFile( wanted_config )
+ if wanted_config:
+ cparser.parseFile( wanted_config )
except:
sys.stderr.write( "error: can't parse '%s'" % wanted_config )
sys.exit(1)
@@ -145,7 +139,8 @@
# first, obtain the list of kernel files used by our clients
fparser = kernel.HeaderScanner()
-walk_source_files( args, parse_file, fparser, excludes=["kernel_headers"] )
+dir_excludes=[".repo","external/kernel-headers","ndk","out","prebuilt","bionic/libc/kernel","development/ndk","external/qemu/distrib"]
+walk_source_files( args[1:], parse_file, fparser, excludes=["./"+f for f in dir_excludes] )
headers = fparser.getHeaders()
files = fparser.getFiles()
@@ -170,6 +165,6 @@
sys.exit(0)
for h in sorted(headers):
- print h
+ print "%s" % h
sys.exit(0)
diff --git a/libc/kernel/tools/kernel.py b/libc/kernel/tools/kernel.py
index 9d9b5f0..c203985 100644
--- a/libc/kernel/tools/kernel.py
+++ b/libc/kernel/tools/kernel.py
@@ -55,8 +55,11 @@
# <asm-generic/*>
# <mtd/*>
#
- re_combined =\
- re.compile(r"^.*<((%s)/[\d\w_\+\.\-/]*)>.*$" % string.join(kernel_dirs,"|") )
+ re_combined_str=\
+ r"^.*<((%s)/[\d\w_\+\.\-/]*)>.*$" % string.join(kernel_dirs,"|")
+
+ re_combined = re.compile(re_combined_str)
+
# some kernel files choose to include files with relative paths (x86 32/64
# dispatch for instance)
re_rel_dir = re.compile(r'^.*"([\d\w_\+\.\-/]+)".*$')
diff --git a/libc/kernel/tools/update_all.py b/libc/kernel/tools/update_all.py
index d25dc0e..6a730a5 100755
--- a/libc/kernel/tools/update_all.py
+++ b/libc/kernel/tools/update_all.py
@@ -6,7 +6,7 @@
def usage():
print """\
- usage: %(progname)s
+ usage: %(progname)s [kernel-original-path]
this program is used to update all the auto-generated clean headers
used by the Bionic C library. it assumes the following:
@@ -31,13 +31,19 @@
sys.stderr.write( "error: unrecognized option\n" )
usage()
-if len(optlist) > 0 or len(args) > 0:
+if len(optlist) > 0 or len(args) > 1:
usage()
progdir = find_program_dir()
-original_dir = os.path.normpath( progdir + "/../original" )
-if not os.path.isdir( original_dir ):
- panic( "required directory does not exists: %s\n" % original_dir )
+
+if len(args) == 1:
+ original_dir = arg[0]
+ if not os.path.isdir(original_dir):
+ panic( "Not a directory: %s" % original_dir )
+else:
+ original_dir = kernel_original_path
+ if not os.path.isdir(original_dir):
+ panic( "Missing directory, please specify one through command-line: %s" % original_dir )
# find all source files in 'original'
#
@@ -57,29 +63,36 @@
#print "OLD " + repr(b.old_files)
+oldlen = 120
for path in sources:
- dst_path, newdata = clean_header.cleanupFile(path)
+ dst_path, newdata = clean_header.cleanupFile(path, original_dir)
if not dst_path:
continue
b.readFile( dst_path )
r = b.editFile( dst_path, newdata )
if r == 0:
- r = "unchanged"
+ state = "unchanged"
elif r == 1:
- r = "edited"
+ state = "edited"
else:
- r = "added"
+ state = "added"
- print "cleaning: %-*s -> %-*s (%s)" % ( 35, path, 35, dst_path, r )
+ str = "cleaning: %-*s -> %-*s (%s)" % ( 35, "<original>" + path[len(original_dir):], 35, dst_path, state )
+ if sys.stdout.isatty():
+ print "%-*s" % (oldlen,str),
+ if (r == 0):
+ print "\r",
+ else:
+ print "\n",
+ oldlen = 0
+ else:
+ print str
-# We don't use Perforce anymore, but just in case, define ANDROID_USE_P4
-# in your environment if you think you need it.
-usePerforce = os.environ.has_key("ANDROID_USE_P4")
+ oldlen = len(str)
-if usePerforce:
- b.updateP4Files()
-else:
- b.updateFiles()
+print "%-*s" % (oldlen,"Done!")
+
+b.updateGitFiles()
sys.exit(0)
diff --git a/libc/kernel/tools/utils.py b/libc/kernel/tools/utils.py
index 763c7d2..f4cf540 100644
--- a/libc/kernel/tools/utils.py
+++ b/libc/kernel/tools/utils.py
@@ -231,6 +231,15 @@
def walk_source_files(paths,callback,args,excludes=[]):
"""recursively walk a list of paths and files, only keeping the source files in directories"""
for path in paths:
+ if len(path) > 0 and path[0] == '@':
+ # this is the name of another file, include it and parse it
+ path = path[1:]
+ if os.path.exists(path):
+ for line in open(path):
+ if len(line) > 0 and line[-1] == '\n':
+ line = line[:-1]
+ walk_source_files([line],callback,args,excludes)
+ continue
if not os.path.isdir(path):
callback(path,args)
else:
@@ -238,7 +247,7 @@
#print "w-- %s (ex: %s)" % (repr((root,dirs)), repr(excludes))
if len(excludes):
for d in dirs[:]:
- if d in excludes:
+ if os.path.join(root,d) in excludes:
dirs.remove(d)
for f in files:
r, ext = os.path.splitext(f)
@@ -395,3 +404,19 @@
D2("P4 DELETES: %s" % files)
o = commands.getoutput( "p4 delete " + files )
D2( o )
+
+ def updateGitFiles(self):
+ adds, deletes, edits = self.getChanges()
+
+ if adds:
+ for dst in sorted(adds):
+ self._writeFile(dst)
+ commands.getoutput("git add " + " ".join(adds))
+
+ if deletes:
+ commands.getoutput("git rm " + " ".join(deletes))
+
+ if edits:
+ for dst in sorted(edits):
+ self._writeFile(dst)
+ commands.getoutput("git add " + " ".join(edits))