Python cleanups, round 1: whitespace

- Use 4 spaces instead of 2 for indentation. This is Python standard
  and is also in Google's styleguide for Python:
  https://google.github.io/styleguide/pyguide.html#Indentation
- Use 2 newlines between functions/classes

This does introduce a few line-too-long errors to clean up which will
be fixed in the follow-up commit, but wanted to keep this as easy to
review as possible (git diff -w should be minimal)

Change-Id: I463f18d11e72745107350ac0ae5588d1fb626ed6
This commit is contained in:
Chad Horohoe 2018-05-16 22:33:06 -04:00 committed by Paladox
parent c1399cf4c7
commit dd22470db8
19 changed files with 1217 additions and 1152 deletions

View File

@ -229,54 +229,54 @@ opts.add_option('--no-searchbox', action="store_false", dest='searchbox',
options, _ = opts.parse_args()
try:
try:
out_file = open(options.out, 'w', errors='ignore')
src_file = open(options.src, 'r', errors='ignore')
except TypeError:
out_file = open(options.out, 'w')
src_file = open(options.src, 'r')
last_line = ''
ignore_next_line = False
last_title = ''
for line in src_file:
if PAT_GERRIT.match(last_line):
# Case of "GERRIT\n------" at the footer
out_file.write(GERRIT_UPLINK)
last_line = ''
elif PAT_SEARCHBOX.match(last_line):
# Case of 'SEARCHBOX\n---------'
if options.searchbox:
out_file.write(SEARCH_BOX)
last_line = ''
elif PAT_INCLUDE.match(line):
# Case of 'include::<filename>'
match = PAT_INCLUDE.match(line)
out_file.write(last_line)
last_line = match.group(1) + options.suffix + match.group(2) + '\n'
elif PAT_STARS.match(line):
if PAT_TITLE.match(last_line):
# Case of the title in '.<title>\n****\nget::<url>\n****'
match = PAT_TITLE.match(last_line)
last_title = GET_TITLE % match.group(1)
else:
out_file.write(last_line)
last_title = ''
elif PAT_GET.match(line):
# Case of '****\nget::<url>\n****' in rest api
url = PAT_GET.match(line).group(1)
out_file.write(GET_MACRO.format(url) % last_title)
ignore_next_line = True
elif ignore_next_line:
# Handle the trailing '****' of the 'get::' case
last_line = ''
ignore_next_line = False
else:
out_file.write(last_line)
last_line = line
out_file.write(last_line)
out_file.write(LINK_SCRIPT)
out_file.close()
try:
out_file = open(options.out, 'w', errors='ignore')
src_file = open(options.src, 'r', errors='ignore')
except TypeError:
out_file = open(options.out, 'w')
src_file = open(options.src, 'r')
last_line = ''
ignore_next_line = False
last_title = ''
for line in src_file:
if PAT_GERRIT.match(last_line):
# Case of "GERRIT\n------" at the footer
out_file.write(GERRIT_UPLINK)
last_line = ''
elif PAT_SEARCHBOX.match(last_line):
# Case of 'SEARCHBOX\n---------'
if options.searchbox:
out_file.write(SEARCH_BOX)
last_line = ''
elif PAT_INCLUDE.match(line):
# Case of 'include::<filename>'
match = PAT_INCLUDE.match(line)
out_file.write(last_line)
last_line = match.group(1) + options.suffix + match.group(2) + '\n'
elif PAT_STARS.match(line):
if PAT_TITLE.match(last_line):
# Case of the title in '.<title>\n****\nget::<url>\n****'
match = PAT_TITLE.match(last_line)
last_title = GET_TITLE % match.group(1)
else:
out_file.write(last_line)
last_title = ''
elif PAT_GET.match(line):
# Case of '****\nget::<url>\n****' in rest api
url = PAT_GET.match(line).group(1)
out_file.write(GET_MACRO.format(url) % last_title)
ignore_next_line = True
elif ignore_next_line:
# Handle the trailing '****' of the 'get::' case
last_line = ''
ignore_next_line = False
else:
out_file.write(last_line)
last_line = line
out_file.write(last_line)
out_file.write(LINK_SCRIPT)
out_file.close()
except IOError as err:
sys.stderr.write(
"error while expanding %s to %s: %s" % (options.src, options.out, err))
exit(1)
sys.stderr.write(
"error while expanding %s to %s: %s" % (options.src, options.out, err))
exit(1)

View File

@ -17,6 +17,7 @@ FAILURE_MESSAGE = 'This commit message does not match the standard.' \
PASS_SCORE = '--code-review=0'
PASS_MESSAGE = ''
def main():
change = None
project = None
@ -49,7 +50,7 @@ def main():
sys.exit(-1)
if change == None or project == None or branch == None \
or commit == None or patchset == None:
or commit == None or patchset == None:
usage()
sys.exit(-1)
@ -58,7 +59,7 @@ def main():
if status != 0:
print('Error running \'%s\'. status: %s, output:\n\n%s' % \
(command, status, output))
(command, status, output))
sys.exit(-1)
commitMessage = output[(output.find('\n\n')+2):]
@ -66,7 +67,7 @@ def main():
if len(commitLines) > 1 and len(commitLines[1]) != 0:
fail(commit, 'Invalid commit summary. The summary must be ' \
+ 'one line followed by a blank line.')
+ 'one line followed by a blank line.')
i = 0
for line in commitLines:
@ -76,23 +77,27 @@ def main():
passes(commit)
def usage():
print('Usage:\n')
print(sys.argv[0] + ' --change <change id> --project <project name> ' \
+ '--branch <branch> --commit <sha1> --patchset <patchset id>')
+ '--branch <branch> --commit <sha1> --patchset <patchset id>')
def fail( commit, message ):
def fail(commit, message):
command = SSH_COMMAND + FAILURE_SCORE + ' -m \\\"' \
+ _shell_escape( FAILURE_MESSAGE + '\n\n' + message) \
+ _shell_escape(FAILURE_MESSAGE + '\n\n' + message) \
+ '\\\" ' + commit
subprocess.getstatusoutput(command)
sys.exit(1)
def passes( commit ):
def passes(commit):
command = SSH_COMMAND + PASS_SCORE + ' -m \\\"' \
+ _shell_escape(PASS_MESSAGE) + ' \\\" ' + commit
subprocess.getstatusoutput(command)
def _shell_escape(x):
s = ''
for c in x:
@ -102,6 +107,6 @@ def _shell_escape(x):
s = s + c
return s
if __name__ == '__main__':
main()

View File

@ -47,228 +47,229 @@ HEADERS = {"Content-Type": "application/json", "charset": "UTF-8"}
# Random names from US Census Data
FIRST_NAMES = [
"Casey", "Yesenia", "Shirley", "Tara", "Wanda", "Sheryl", "Jaime", "Elaine",
"Charlotte", "Carly", "Bonnie", "Kirsten", "Kathryn", "Carla", "Katrina",
"Melody", "Suzanne", "Sandy", "Joann", "Kristie", "Sally", "Emma", "Susan",
"Amanda", "Alyssa", "Patty", "Angie", "Dominique", "Cynthia", "Jennifer",
"Theresa", "Desiree", "Kaylee", "Maureen", "Jeanne", "Kellie", "Valerie",
"Nina", "Judy", "Diamond", "Anita", "Rebekah", "Stefanie", "Kendra", "Erin",
"Tammie", "Tracey", "Bridget", "Krystal", "Jasmin", "Sonia", "Meghan",
"Rebecca", "Jeanette", "Meredith", "Beverly", "Natasha", "Chloe", "Selena",
"Teresa", "Sheena", "Cassandra", "Rhonda", "Tami", "Jodi", "Shelly", "Angela",
"Kimberly", "Terry", "Joanna", "Isabella", "Lindsey", "Loretta", "Dana",
"Veronica", "Carolyn", "Laura", "Karen", "Dawn", "Alejandra", "Cassie",
"Lorraine", "Yolanda", "Kerry", "Stephanie", "Caitlin", "Melanie", "Kerri",
"Doris", "Sandra", "Beth", "Carol", "Vicki", "Shelia", "Bethany", "Rachael",
"Donna", "Alexandra", "Barbara", "Ana", "Jillian", "Ann", "Rachel", "Lauren",
"Hayley", "Misty", "Brianna", "Tanya", "Danielle", "Courtney", "Jacqueline",
"Becky", "Christy", "Alisha", "Phyllis", "Faith", "Jocelyn", "Nancy",
"Gloria", "Kristen", "Evelyn", "Julie", "Julia", "Kara", "Chelsey", "Cassidy",
"Jean", "Chelsea", "Jenny", "Diana", "Haley", "Kristine", "Kristina", "Erika",
"Jenna", "Alison", "Deanna", "Abigail", "Melissa", "Sierra", "Linda",
"Monica", "Tasha", "Traci", "Yvonne", "Tracy", "Marie", "Maria", "Michaela",
"Stacie", "April", "Morgan", "Cathy", "Darlene", "Cristina", "Emily"
"Ian", "Russell", "Phillip", "Jay", "Barry", "Brad", "Frederick", "Fernando",
"Timothy", "Ricardo", "Bernard", "Daniel", "Ruben", "Alexis", "Kyle", "Malik",
"Norman", "Kent", "Melvin", "Stephen", "Daryl", "Kurt", "Greg", "Alex",
"Mario", "Riley", "Marvin", "Dan", "Steven", "Roberto", "Lucas", "Leroy",
"Preston", "Drew", "Fred", "Casey", "Wesley", "Elijah", "Reginald", "Joel",
"Christopher", "Jacob", "Luis", "Philip", "Mark", "Rickey", "Todd", "Scott",
"Terrence", "Jim", "Stanley", "Bobby", "Thomas", "Gabriel", "Tracy", "Marcus",
"Peter", "Michael", "Calvin", "Herbert", "Darryl", "Billy", "Ross", "Dustin",
"Jaime", "Adam", "Henry", "Xavier", "Dominic", "Lonnie", "Danny", "Victor",
"Glen", "Perry", "Jackson", "Grant", "Gerald", "Garrett", "Alejandro",
"Eddie", "Alan", "Ronnie", "Mathew", "Dave", "Wayne", "Joe", "Craig",
"Terry", "Chris", "Randall", "Parker", "Francis", "Keith", "Neil", "Caleb",
"Jon", "Earl", "Taylor", "Bryce", "Brady", "Max", "Sergio", "Leon", "Gene",
"Darin", "Bill", "Edgar", "Antonio", "Dalton", "Arthur", "Austin", "Cristian",
"Kevin", "Omar", "Kelly", "Aaron", "Ethan", "Tom", "Isaac", "Maurice",
"Gilbert", "Hunter", "Willie", "Harry", "Dale", "Darius", "Jerome", "Jason",
"Harold", "Kerry", "Clarence", "Gregg", "Shane", "Eduardo", "Micheal",
"Howard", "Vernon", "Rodney", "Anthony", "Levi", "Larry", "Franklin", "Jimmy",
"Jonathon", "Carl",
"Casey", "Yesenia", "Shirley", "Tara", "Wanda", "Sheryl", "Jaime", "Elaine",
"Charlotte", "Carly", "Bonnie", "Kirsten", "Kathryn", "Carla", "Katrina",
"Melody", "Suzanne", "Sandy", "Joann", "Kristie", "Sally", "Emma", "Susan",
"Amanda", "Alyssa", "Patty", "Angie", "Dominique", "Cynthia", "Jennifer",
"Theresa", "Desiree", "Kaylee", "Maureen", "Jeanne", "Kellie", "Valerie",
"Nina", "Judy", "Diamond", "Anita", "Rebekah", "Stefanie", "Kendra", "Erin",
"Tammie", "Tracey", "Bridget", "Krystal", "Jasmin", "Sonia", "Meghan",
"Rebecca", "Jeanette", "Meredith", "Beverly", "Natasha", "Chloe", "Selena",
"Teresa", "Sheena", "Cassandra", "Rhonda", "Tami", "Jodi", "Shelly", "Angela",
"Kimberly", "Terry", "Joanna", "Isabella", "Lindsey", "Loretta", "Dana",
"Veronica", "Carolyn", "Laura", "Karen", "Dawn", "Alejandra", "Cassie",
"Lorraine", "Yolanda", "Kerry", "Stephanie", "Caitlin", "Melanie", "Kerri",
"Doris", "Sandra", "Beth", "Carol", "Vicki", "Shelia", "Bethany", "Rachael",
"Donna", "Alexandra", "Barbara", "Ana", "Jillian", "Ann", "Rachel", "Lauren",
"Hayley", "Misty", "Brianna", "Tanya", "Danielle", "Courtney", "Jacqueline",
"Becky", "Christy", "Alisha", "Phyllis", "Faith", "Jocelyn", "Nancy",
"Gloria", "Kristen", "Evelyn", "Julie", "Julia", "Kara", "Chelsey", "Cassidy",
"Jean", "Chelsea", "Jenny", "Diana", "Haley", "Kristine", "Kristina", "Erika",
"Jenna", "Alison", "Deanna", "Abigail", "Melissa", "Sierra", "Linda",
"Monica", "Tasha", "Traci", "Yvonne", "Tracy", "Marie", "Maria", "Michaela",
"Stacie", "April", "Morgan", "Cathy", "Darlene", "Cristina", "Emily"
"Ian", "Russell", "Phillip", "Jay", "Barry", "Brad", "Frederick", "Fernando",
"Timothy", "Ricardo", "Bernard", "Daniel", "Ruben", "Alexis", "Kyle", "Malik",
"Norman", "Kent", "Melvin", "Stephen", "Daryl", "Kurt", "Greg", "Alex",
"Mario", "Riley", "Marvin", "Dan", "Steven", "Roberto", "Lucas", "Leroy",
"Preston", "Drew", "Fred", "Casey", "Wesley", "Elijah", "Reginald", "Joel",
"Christopher", "Jacob", "Luis", "Philip", "Mark", "Rickey", "Todd", "Scott",
"Terrence", "Jim", "Stanley", "Bobby", "Thomas", "Gabriel", "Tracy", "Marcus",
"Peter", "Michael", "Calvin", "Herbert", "Darryl", "Billy", "Ross", "Dustin",
"Jaime", "Adam", "Henry", "Xavier", "Dominic", "Lonnie", "Danny", "Victor",
"Glen", "Perry", "Jackson", "Grant", "Gerald", "Garrett", "Alejandro",
"Eddie", "Alan", "Ronnie", "Mathew", "Dave", "Wayne", "Joe", "Craig",
"Terry", "Chris", "Randall", "Parker", "Francis", "Keith", "Neil", "Caleb",
"Jon", "Earl", "Taylor", "Bryce", "Brady", "Max", "Sergio", "Leon", "Gene",
"Darin", "Bill", "Edgar", "Antonio", "Dalton", "Arthur", "Austin", "Cristian",
"Kevin", "Omar", "Kelly", "Aaron", "Ethan", "Tom", "Isaac", "Maurice",
"Gilbert", "Hunter", "Willie", "Harry", "Dale", "Darius", "Jerome", "Jason",
"Harold", "Kerry", "Clarence", "Gregg", "Shane", "Eduardo", "Micheal",
"Howard", "Vernon", "Rodney", "Anthony", "Levi", "Larry", "Franklin", "Jimmy",
"Jonathon", "Carl",
]
LAST_NAMES = [
"Savage", "Hendrix", "Moon", "Larsen", "Rocha", "Burgess", "Bailey", "Farley",
"Moses", "Schmidt", "Brown", "Hoover", "Klein", "Jennings", "Braun", "Rangel",
"Casey", "Dougherty", "Hancock", "Wolf", "Henry", "Thomas", "Bentley",
"Barnett", "Kline", "Pitts", "Rojas", "Sosa", "Paul", "Hess", "Chase",
"Mckay", "Bender", "Colins", "Montoya", "Townsend", "Potts", "Ayala", "Avery",
"Sherman", "Tapia", "Hamilton", "Ferguson", "Huang", "Hooper", "Zamora",
"Logan", "Lloyd", "Quinn", "Monroe", "Brock", "Ibarra", "Fowler", "Weiss",
"Montgomery", "Diaz", "Dixon", "Olson", "Robertson", "Arias", "Benjamin",
"Abbott", "Stein", "Schroeder", "Beck", "Velasquez", "Barber", "Nichols",
"Ortiz", "Burns", "Moody", "Stokes", "Wilcox", "Rush", "Michael", "Kidd",
"Rowland", "Mclean", "Saunders", "Chung", "Newton", "Potter", "Hickman",
"Ray", "Larson", "Figueroa", "Duncan", "Sparks", "Rose", "Hodge", "Huynh",
"Joseph", "Morales", "Beasley", "Mora", "Fry", "Ross", "Novak", "Hahn",
"Wise", "Knight", "Frederick", "Heath", "Pollard", "Vega", "Mcclain",
"Buckley", "Conrad", "Cantrell", "Bond", "Mejia", "Wang", "Lewis", "Johns",
"Mcknight", "Callahan", "Reynolds", "Norris", "Burnett", "Carey", "Jacobson",
"Oneill", "Oconnor", "Leonard", "Mckenzie", "Hale", "Delgado", "Spence",
"Brandt", "Obrien", "Bowman", "James", "Avila", "Roberts", "Barker", "Cohen",
"Bradley", "Prince", "Warren", "Summers", "Little", "Caldwell", "Garrett",
"Hughes", "Norton", "Burke", "Holden", "Merritt", "Lee", "Frank", "Wiley",
"Ho", "Weber", "Keith", "Winters", "Gray", "Watts", "Brady", "Aguilar",
"Nicholson", "David", "Pace", "Cervantes", "Davis", "Baxter", "Sanchez",
"Singleton", "Taylor", "Strickland", "Glenn", "Valentine", "Roy", "Cameron",
"Beard", "Norman", "Fritz", "Anthony", "Koch", "Parrish", "Herman", "Hines",
"Sutton", "Gallegos", "Stephenson", "Lozano", "Franklin", "Howe", "Bauer",
"Love", "Ali", "Ellison", "Lester", "Guzman", "Jarvis", "Espinoza",
"Fletcher", "Burton", "Woodard", "Peterson", "Barajas", "Richard", "Bryan",
"Goodman", "Cline", "Rowe", "Faulkner", "Crawford", "Mueller", "Patterson",
"Hull", "Walton", "Wu", "Flores", "York", "Dickson", "Barnes", "Fisher",
"Strong", "Juarez", "Fitzgerald", "Schmitt", "Blevins", "Villa", "Sullivan",
"Velazquez", "Horton", "Meadows", "Riley", "Barrera", "Neal", "Mendez",
"Mcdonald", "Floyd", "Lynch", "Mcdowell", "Benson", "Hebert", "Livingston",
"Davies", "Richardson", "Vincent", "Davenport", "Osborn", "Mckee", "Marshall",
"Ferrell", "Martinez", "Melton", "Mercer", "Yoder", "Jacobs", "Mcdaniel",
"Mcmillan", "Peters", "Atkinson", "Wood", "Briggs", "Valencia", "Chandler",
"Rios", "Hunter", "Bean", "Hicks", "Hays", "Lucero", "Malone", "Waller",
"Banks", "Myers", "Mitchell", "Grimes", "Houston", "Hampton", "Trujillo",
"Perkins", "Moran", "Welch", "Contreras", "Montes", "Ayers", "Hayden",
"Daniel", "Weeks", "Porter", "Gill", "Mullen", "Nolan", "Dorsey", "Crane",
"Estes", "Lam", "Wells", "Cisneros", "Giles", "Watson", "Vang", "Scott",
"Knox", "Hanna", "Fields",
"Savage", "Hendrix", "Moon", "Larsen", "Rocha", "Burgess", "Bailey", "Farley",
"Moses", "Schmidt", "Brown", "Hoover", "Klein", "Jennings", "Braun", "Rangel",
"Casey", "Dougherty", "Hancock", "Wolf", "Henry", "Thomas", "Bentley",
"Barnett", "Kline", "Pitts", "Rojas", "Sosa", "Paul", "Hess", "Chase",
"Mckay", "Bender", "Colins", "Montoya", "Townsend", "Potts", "Ayala", "Avery",
"Sherman", "Tapia", "Hamilton", "Ferguson", "Huang", "Hooper", "Zamora",
"Logan", "Lloyd", "Quinn", "Monroe", "Brock", "Ibarra", "Fowler", "Weiss",
"Montgomery", "Diaz", "Dixon", "Olson", "Robertson", "Arias", "Benjamin",
"Abbott", "Stein", "Schroeder", "Beck", "Velasquez", "Barber", "Nichols",
"Ortiz", "Burns", "Moody", "Stokes", "Wilcox", "Rush", "Michael", "Kidd",
"Rowland", "Mclean", "Saunders", "Chung", "Newton", "Potter", "Hickman",
"Ray", "Larson", "Figueroa", "Duncan", "Sparks", "Rose", "Hodge", "Huynh",
"Joseph", "Morales", "Beasley", "Mora", "Fry", "Ross", "Novak", "Hahn",
"Wise", "Knight", "Frederick", "Heath", "Pollard", "Vega", "Mcclain",
"Buckley", "Conrad", "Cantrell", "Bond", "Mejia", "Wang", "Lewis", "Johns",
"Mcknight", "Callahan", "Reynolds", "Norris", "Burnett", "Carey", "Jacobson",
"Oneill", "Oconnor", "Leonard", "Mckenzie", "Hale", "Delgado", "Spence",
"Brandt", "Obrien", "Bowman", "James", "Avila", "Roberts", "Barker", "Cohen",
"Bradley", "Prince", "Warren", "Summers", "Little", "Caldwell", "Garrett",
"Hughes", "Norton", "Burke", "Holden", "Merritt", "Lee", "Frank", "Wiley",
"Ho", "Weber", "Keith", "Winters", "Gray", "Watts", "Brady", "Aguilar",
"Nicholson", "David", "Pace", "Cervantes", "Davis", "Baxter", "Sanchez",
"Singleton", "Taylor", "Strickland", "Glenn", "Valentine", "Roy", "Cameron",
"Beard", "Norman", "Fritz", "Anthony", "Koch", "Parrish", "Herman", "Hines",
"Sutton", "Gallegos", "Stephenson", "Lozano", "Franklin", "Howe", "Bauer",
"Love", "Ali", "Ellison", "Lester", "Guzman", "Jarvis", "Espinoza",
"Fletcher", "Burton", "Woodard", "Peterson", "Barajas", "Richard", "Bryan",
"Goodman", "Cline", "Rowe", "Faulkner", "Crawford", "Mueller", "Patterson",
"Hull", "Walton", "Wu", "Flores", "York", "Dickson", "Barnes", "Fisher",
"Strong", "Juarez", "Fitzgerald", "Schmitt", "Blevins", "Villa", "Sullivan",
"Velazquez", "Horton", "Meadows", "Riley", "Barrera", "Neal", "Mendez",
"Mcdonald", "Floyd", "Lynch", "Mcdowell", "Benson", "Hebert", "Livingston",
"Davies", "Richardson", "Vincent", "Davenport", "Osborn", "Mckee", "Marshall",
"Ferrell", "Martinez", "Melton", "Mercer", "Yoder", "Jacobs", "Mcdaniel",
"Mcmillan", "Peters", "Atkinson", "Wood", "Briggs", "Valencia", "Chandler",
"Rios", "Hunter", "Bean", "Hicks", "Hays", "Lucero", "Malone", "Waller",
"Banks", "Myers", "Mitchell", "Grimes", "Houston", "Hampton", "Trujillo",
"Perkins", "Moran", "Welch", "Contreras", "Montes", "Ayers", "Hayden",
"Daniel", "Weeks", "Porter", "Gill", "Mullen", "Nolan", "Dorsey", "Crane",
"Estes", "Lam", "Wells", "Cisneros", "Giles", "Watson", "Vang", "Scott",
"Knox", "Hanna", "Fields",
]
def clean(json_string):
# Strip JSON XSS Tag
json_string = json_string.strip()
if json_string.startswith(")]}'"):
return json_string[5:]
return json_string
# Strip JSON XSS Tag
json_string = json_string.strip()
if json_string.startswith(")]}'"):
return json_string[5:]
return json_string
def basic_auth(user):
return requests.auth.HTTPBasicAuth(user["username"], user["http_password"])
return requests.auth.HTTPBasicAuth(user["username"], user["http_password"])
def fetch_admin_group():
global GROUP_ADMIN
# Get admin group
r = json.loads(clean(requests.get(BASE_URL + "groups/" + "?suggest=ad&p=All-Projects",
headers=HEADERS,
auth=ADMIN_BASIC_AUTH).text))
admin_group_name = r.keys()[0]
GROUP_ADMIN = r[admin_group_name]
GROUP_ADMIN["name"] = admin_group_name
global GROUP_ADMIN
# Get admin group
r = json.loads(clean(requests.get(BASE_URL + "groups/" + "?suggest=ad&p=All-Projects",
headers=HEADERS,
auth=ADMIN_BASIC_AUTH).text))
admin_group_name = r.keys()[0]
GROUP_ADMIN = r[admin_group_name]
GROUP_ADMIN["name"] = admin_group_name
def generate_random_text():
return " ".join([random.choice("lorem ipsum "
"doleret delendam "
"\n esse".split(" ")) for _ in range(1, 100)])
return " ".join([random.choice("lorem ipsum "
"doleret delendam "
"\n esse".split(" ")) for _ in range(1, 100)])
def set_up():
global TMP_PATH
TMP_PATH = tempfile.mkdtemp()
atexit.register(clean_up)
os.makedirs(TMP_PATH + "/ssh")
os.makedirs(TMP_PATH + "/repos")
fetch_admin_group()
global TMP_PATH
TMP_PATH = tempfile.mkdtemp()
atexit.register(clean_up)
os.makedirs(TMP_PATH + "/ssh")
os.makedirs(TMP_PATH + "/repos")
fetch_admin_group()
def get_random_users(num_users):
users = random.sample([(f, l) for f in FIRST_NAMES for l in LAST_NAMES],
num_users)
names = []
for u in users:
names.append({"firstname": u[0],
"lastname": u[1],
"name": u[0] + " " + u[1],
"username": u[0] + u[1],
"email": u[0] + "." + u[1] + "@gerritcodereview.com",
"http_password": "secret",
"groups": []})
return names
users = random.sample([(f, l) for f in FIRST_NAMES for l in LAST_NAMES],
num_users)
names = []
for u in users:
names.append({"firstname": u[0],
"lastname": u[1],
"name": u[0] + " " + u[1],
"username": u[0] + u[1],
"email": u[0] + "." + u[1] + "@gerritcodereview.com",
"http_password": "secret",
"groups": []})
return names
def generate_ssh_keys(gerrit_users):
for user in gerrit_users:
key_file = TMP_PATH + "/ssh/" + user["username"] + ".key"
subprocess.check_output(["ssh-keygen", "-f", key_file, "-N", ""])
with open(key_file + ".pub", "r") as f:
user["ssh_key"] = f.read()
for user in gerrit_users:
key_file = TMP_PATH + "/ssh/" + user["username"] + ".key"
subprocess.check_output(["ssh-keygen", "-f", key_file, "-N", ""])
with open(key_file + ".pub", "r") as f:
user["ssh_key"] = f.read()
def create_gerrit_groups():
groups = [
{"name": "iOS-Maintainers", "description": "iOS Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Android-Maintainers", "description": "Android Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Backend-Maintainers", "description": "Backend Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Script-Maintainers", "description": "Script Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Security-Team", "description": "Sec Team",
"visible_to_all": False, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]}]
for g in groups:
requests.put(BASE_URL + "groups/" + g["name"],
json.dumps(g),
headers=HEADERS,
auth=ADMIN_BASIC_AUTH)
return [g["name"] for g in groups]
groups = [
{"name": "iOS-Maintainers", "description": "iOS Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Android-Maintainers", "description": "Android Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Backend-Maintainers", "description": "Backend Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Script-Maintainers", "description": "Script Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Security-Team", "description": "Sec Team",
"visible_to_all": False, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]}]
for g in groups:
requests.put(BASE_URL + "groups/" + g["name"],
json.dumps(g),
headers=HEADERS,
auth=ADMIN_BASIC_AUTH)
return [g["name"] for g in groups]
def create_gerrit_projects(owner_groups):
projects = [
{"id": "android", "name": "Android", "parent": "All-Projects",
"branches": ["master"], "description": "Our android app.",
"owners": [owner_groups[0]], "create_empty_commit": True},
{"id": "ios", "name": "iOS", "parent": "All-Projects",
"branches": ["master"], "description": "Our ios app.",
"owners": [owner_groups[1]], "create_empty_commit": True},
{"id": "backend", "name": "Backend", "parent": "All-Projects",
"branches": ["master"], "description": "Our awesome backend.",
"owners": [owner_groups[2]], "create_empty_commit": True},
{"id": "scripts", "name": "Scripts", "parent": "All-Projects",
"branches": ["master"], "description": "some small scripts.",
"owners": [owner_groups[3]], "create_empty_commit": True}]
for p in projects:
requests.put(BASE_URL + "projects/" + p["name"],
json.dumps(p),
headers=HEADERS,
auth=ADMIN_BASIC_AUTH)
return [p["name"] for p in projects]
projects = [
{"id": "android", "name": "Android", "parent": "All-Projects",
"branches": ["master"], "description": "Our android app.",
"owners": [owner_groups[0]], "create_empty_commit": True},
{"id": "ios", "name": "iOS", "parent": "All-Projects",
"branches": ["master"], "description": "Our ios app.",
"owners": [owner_groups[1]], "create_empty_commit": True},
{"id": "backend", "name": "Backend", "parent": "All-Projects",
"branches": ["master"], "description": "Our awesome backend.",
"owners": [owner_groups[2]], "create_empty_commit": True},
{"id": "scripts", "name": "Scripts", "parent": "All-Projects",
"branches": ["master"], "description": "some small scripts.",
"owners": [owner_groups[3]], "create_empty_commit": True}]
for p in projects:
requests.put(BASE_URL + "projects/" + p["name"],
json.dumps(p),
headers=HEADERS,
auth=ADMIN_BASIC_AUTH)
return [p["name"] for p in projects]
def create_gerrit_users(gerrit_users):
for user in gerrit_users:
requests.put(BASE_URL + "accounts/" + user["username"],
json.dumps(user),
headers=HEADERS,
auth=ADMIN_BASIC_AUTH)
for user in gerrit_users:
requests.put(BASE_URL + "accounts/" + user["username"],
json.dumps(user),
headers=HEADERS,
auth=ADMIN_BASIC_AUTH)
def create_change(user, project_name):
random_commit_message = generate_random_text()
change = {
"project": project_name,
"subject": random_commit_message.split("\n")[0],
"branch": "master",
"status": "NEW",
}
requests.post(BASE_URL + "changes/",
json.dumps(change),
headers=HEADERS,
auth=basic_auth(user))
random_commit_message = generate_random_text()
change = {
"project": project_name,
"subject": random_commit_message.split("\n")[0],
"branch": "master",
"status": "NEW",
}
requests.post(BASE_URL + "changes/",
json.dumps(change),
headers=HEADERS,
auth=basic_auth(user))
def clean_up():
shutil.rmtree(TMP_PATH)
shutil.rmtree(TMP_PATH)
def main():
<<<<<<< HEAD
p = optparse.OptionParser()
p.add_option("-u", "--user_count", action="store",
default=100,
@ -301,5 +302,40 @@ def main():
for idx, u in enumerate(gerrit_users):
for _ in range(random.randint(1, 5)):
create_change(u, project_names[4 * idx / len(gerrit_users)])
=======
p = optparse.OptionParser()
p.add_option("-u", "--user_count", action="store",
default=100,
type='int',
help="number of users to generate")
p.add_option("-p", "--port", action="store",
default=8080,
type='int',
help="port of server")
(options, _) = p.parse_args()
global BASE_URL
BASE_URL = BASE_URL % options.port
print(BASE_URL)
set_up()
gerrit_users = get_random_users(options.user_count)
group_names = create_gerrit_groups()
for idx, u in enumerate(gerrit_users):
u["groups"].append(group_names[idx % len(group_names)])
if idx % 5 == 0:
# Also add to security group
u["groups"].append(group_names[4])
generate_ssh_keys(gerrit_users)
create_gerrit_users(gerrit_users)
project_names = create_gerrit_projects(group_names)
for idx, u in enumerate(gerrit_users):
for _ in xrange(random.randint(1, 5)):
create_change(u, project_names[4 * idx / len(gerrit_users)])
>>>>>>> 730efd14f4... Python cleanups, round 1: whitespace
main()

View File

@ -10,103 +10,109 @@ fnCompiledRegex = re.compile(removeSelfInvokeRegex, re.DOTALL)
regexBehavior = r"<script>(.+)<\/script>"
behaviorCompiledRegex = re.compile(regexBehavior, re.DOTALL)
def _open(filename, mode="r"):
try:
return open(filename, mode, encoding="utf-8")
except TypeError:
return open(filename, mode)
try:
return open(filename, mode, encoding="utf-8")
except TypeError:
return open(filename, mode)
def replaceBehaviorLikeHTML (fileIn, fileOut):
with _open(fileIn) as f:
file_str = f.read()
match = behaviorCompiledRegex.search(file_str)
if (match):
with _open("polygerrit-ui/temp/behaviors/" + fileOut.replace("html", "js") , "w+") as f:
f.write(match.group(1))
def replaceBehaviorLikeJS (fileIn, fileOut):
with _open(fileIn) as f:
file_str = f.read()
with _open("polygerrit-ui/temp/behaviors/" + fileOut , "w+") as f:
f.write(file_str)
def replaceBehaviorLikeHTML(fileIn, fileOut):
with _open(fileIn) as f:
file_str = f.read()
match = behaviorCompiledRegex.search(file_str)
if (match):
with _open("polygerrit-ui/temp/behaviors/" + fileOut.replace("html", "js"), "w+") as f:
f.write(match.group(1))
def replaceBehaviorLikeJS(fileIn, fileOut):
with _open(fileIn) as f:
file_str = f.read()
with _open("polygerrit-ui/temp/behaviors/" + fileOut, "w+") as f:
f.write(file_str)
def generateStubBehavior(behaviorName):
with _open("polygerrit-ui/temp/behaviors/" + behaviorName + ".js", "w+") as f:
f.write("/** @polymerBehavior **/\n" + behaviorName + "= {};")
with _open("polygerrit-ui/temp/behaviors/" + behaviorName + ".js", "w+") as f:
f.write("/** @polymerBehavior **/\n" + behaviorName + "= {};")
def replacePolymerElement (fileIn, fileOut, root):
with _open(fileIn) as f:
key = fileOut.split('.')[0]
# Removed self invoked function
file_str = f.read()
file_str_no_fn = fnCompiledRegex.search(file_str)
if file_str_no_fn:
package = root.replace("/", ".") + "." + fileOut
def replacePolymerElement(fileIn, fileOut, root):
with _open(fileIn) as f:
key = fileOut.split('.')[0]
# Removed self invoked function
file_str = f.read()
file_str_no_fn = fnCompiledRegex.search(file_str)
with _open("polygerrit-ui/temp/" + fileOut, "w+") as f:
mainFileContents = re.sub(polymerCompiledRegex, "exports = Polymer({", file_str_no_fn.group(1)).replace("'use strict';", "")
f.write("/** \n" \
"* @fileoverview \n" \
"* @suppress {missingProperties} \n" \
"*/ \n\n" \
"goog.module('polygerrit." + package + "')\n\n" + mainFileContents)
if file_str_no_fn:
package = root.replace("/", ".") + "." + fileOut
with _open("polygerrit-ui/temp/" + fileOut, "w+") as f:
mainFileContents = re.sub(polymerCompiledRegex, "exports = Polymer({", file_str_no_fn.group(1)).replace("'use strict';", "")
f.write("/** \n" \
"* @fileoverview \n" \
"* @suppress {missingProperties} \n" \
"*/ \n\n" \
"goog.module('polygerrit." + package + "')\n\n" + mainFileContents)
# Add package and javascript to files object.
elements[key]["js"] = "polygerrit-ui/temp/" + fileOut
elements[key]["package"] = package
# Add package and javascript to files object.
elements[key]["js"] = "polygerrit-ui/temp/" + fileOut
elements[key]["package"] = package
def writeTempFile(file, root):
# This is included in an extern because it is directly on the window object.
# (for now at least).
if "gr-reporting" in file:
return
key = file.split('.')[0]
if not key in elements:
# gr-app doesn't have an additional level
elements[key] = {"directory": 'gr-app' if len(root.split("/")) < 4 else root.split("/")[3]}
if file.endswith(".html") and not file.endswith("_test.html"):
# gr-navigation is treated like a behavior rather than a standard element
# because of the way it added to the Gerrit object.
if file.endswith("gr-navigation.html"):
replaceBehaviorLikeHTML(os.path.join(root, file), file)
else:
elements[key]["html"] = os.path.join(root, file)
if file.endswith(".js"):
replacePolymerElement(os.path.join(root, file), file, root)
# This is included in an extern because it is directly on the window object.
# (for now at least).
if "gr-reporting" in file:
return
key = file.split('.')[0]
if not key in elements:
# gr-app doesn't have an additional level
elements[key] = {"directory": 'gr-app' if len(root.split("/")) < 4 else root.split("/")[3]}
if file.endswith(".html") and not file.endswith("_test.html"):
# gr-navigation is treated like a behavior rather than a standard element
# because of the way it added to the Gerrit object.
if file.endswith("gr-navigation.html"):
replaceBehaviorLikeHTML(os.path.join(root, file), file)
else:
elements[key]["html"] = os.path.join(root, file)
if file.endswith(".js"):
replacePolymerElement(os.path.join(root, file), file, root)
if __name__ == "__main__":
# Create temp directory.
if not os.path.exists("polygerrit-ui/temp"):
os.makedirs("polygerrit-ui/temp")
# Create temp directory.
if not os.path.exists("polygerrit-ui/temp"):
os.makedirs("polygerrit-ui/temp")
# Within temp directory create behavior directory.
if not os.path.exists("polygerrit-ui/temp/behaviors"):
os.makedirs("polygerrit-ui/temp/behaviors")
# Within temp directory create behavior directory.
if not os.path.exists("polygerrit-ui/temp/behaviors"):
os.makedirs("polygerrit-ui/temp/behaviors")
elements = {}
elements = {}
# Go through every file in app/elements, and re-write accordingly to temp
# directory, and also added to elements object, which is used to generate a
# map of html files, package names, and javascript files.
for root, dirs, files in os.walk("polygerrit-ui/app/elements"):
for file in files:
writeTempFile(file, root)
# Go through every file in app/elements, and re-write accordingly to temp
# directory, and also added to elements object, which is used to generate a
# map of html files, package names, and javascript files.
for root, dirs, files in os.walk("polygerrit-ui/app/elements"):
for file in files:
writeTempFile(file, root)
# Special case for polymer behaviors we are using.
replaceBehaviorLikeHTML("polygerrit-ui/app/bower_components/iron-a11y-keys-behavior/iron-a11y-keys-behavior.html", "iron-a11y-keys-behavior.html")
generateStubBehavior("Polymer.IronOverlayBehavior")
generateStubBehavior("Polymer.IronFitBehavior")
# Special case for polymer behaviors we are using.
replaceBehaviorLikeHTML("polygerrit-ui/app/bower_components/iron-a11y-keys-behavior/iron-a11y-keys-behavior.html", "iron-a11y-keys-behavior.html")
generateStubBehavior("Polymer.IronOverlayBehavior")
generateStubBehavior("Polymer.IronFitBehavior")
#TODO figure out something to do with iron-overlay-behavior. it is hard-coded reformatted.
#TODO figure out something to do with iron-overlay-behavior. it is hard-coded reformatted.
with _open("polygerrit-ui/temp/map.json", "w+") as f:
f.write(json.dumps(elements))
with _open("polygerrit-ui/temp/map.json", "w+") as f:
f.write(json.dumps(elements))
for root, dirs, files in os.walk("polygerrit-ui/app/behaviors"):
for file in files:
if file.endswith("behavior.html"):
replaceBehaviorLikeHTML(os.path.join(root, file), file)
elif file.endswith("behavior.js"):
replaceBehaviorLikeJS(os.path.join(root, file), file)
for root, dirs, files in os.walk("polygerrit-ui/app/behaviors"):
for file in files:
if file.endswith("behavior.html"):
replaceBehaviorLikeHTML(os.path.join(root, file), file)
elif file.endswith("behavior.js"):
replaceBehaviorLikeJS(os.path.join(root, file), file)

View File

@ -19,14 +19,16 @@
from __future__ import print_function
import sys
def print_help():
for (n, v) in vars(sys.modules['__main__']).items():
if not n.startswith("__") and not n in ['help', 'reload'] \
and str(type(v)) != "<type 'javapackage'>" \
and not str(v).startswith("<module"):
print("\"%s\" is \"%s\"" % (n, v))
print()
print("Welcome to the Gerrit Inspector")
print("Enter help() to see the above again, EOF to quit and stop Gerrit")
for (n, v) in vars(sys.modules['__main__']).items():
if not n.startswith("__") and not n in ['help', 'reload'] \
and str(type(v)) != "<type 'javapackage'>" \
and not str(v).startswith("<module"):
print("\"%s\" is \"%s\"" % (n, v))
print()
print("Welcome to the Gerrit Inspector")
print("Enter help() to see the above again, EOF to quit and stop Gerrit")
print_help()

View File

@ -25,33 +25,33 @@ graph = defaultdict(list)
handled_rules = []
for xml in args.xmls:
tree = ET.parse(xml)
root = tree.getroot()
tree = ET.parse(xml)
root = tree.getroot()
for child in root:
rule_name = child.attrib["name"]
if rule_name in handled_rules:
# already handled in other xml files
continue
for child in root:
rule_name = child.attrib["name"]
if rule_name in handled_rules:
# already handled in other xml files
continue
handled_rules.append(rule_name)
for c in child.getchildren():
if c.tag != "rule-input":
continue
handled_rules.append(rule_name)
for c in child.getchildren():
if c.tag != "rule-input":
continue
license_name = c.attrib["name"]
if LICENSE_PREFIX in license_name:
entries[rule_name].append(license_name)
graph[license_name].append(rule_name)
license_name = c.attrib["name"]
if LICENSE_PREFIX in license_name:
entries[rule_name].append(license_name)
graph[license_name].append(rule_name)
if len(graph[DO_NOT_DISTRIBUTE]):
print("DO_NOT_DISTRIBUTE license found in:", file=stderr)
for target in graph[DO_NOT_DISTRIBUTE]:
print(target, file=stderr)
exit(1)
print("DO_NOT_DISTRIBUTE license found in:", file=stderr)
for target in graph[DO_NOT_DISTRIBUTE]:
print(target, file=stderr)
exit(1)
if args.asciidoctor:
print(
print(
# We don't want any blank line before "= Gerrit Code Review - Licenses"
"""= Gerrit Code Review - Licenses
@ -93,39 +93,39 @@ updates of mirror servers, or realtime backups.
""")
for n in sorted(graph.keys()):
if len(graph[n]) == 0:
continue
if len(graph[n]) == 0:
continue
name = n[len(LICENSE_PREFIX):]
safename = name.replace(".", "_")
print()
print("[[%s]]" % safename)
print(name)
print()
for d in sorted(graph[n]):
if d.startswith("//lib:") or d.startswith("//lib/"):
p = d[len("//lib:"):]
else:
p = d[d.index(":")+1:].lower()
if "__" in p:
p = p[:p.index("__")]
print("* " + p)
print()
print("[[%s_license]]" % safename)
print("----")
filename = n[2:].replace(":", "/")
try:
with open(filename, errors='ignore') as fd:
copyfileobj(fd, stdout)
except TypeError:
with open(filename) as fd:
copyfileobj(fd, stdout)
print()
print("----")
print()
name = n[len(LICENSE_PREFIX):]
safename = name.replace(".", "_")
print()
print("[[%s]]" % safename)
print(name)
print()
for d in sorted(graph[n]):
if d.startswith("//lib:") or d.startswith("//lib/"):
p = d[len("//lib:"):]
else:
p = d[d.index(":")+1:].lower()
if "__" in p:
p = p[:p.index("__")]
print("* " + p)
print()
print("[[%s_license]]" % safename)
print("----")
filename = n[2:].replace(":", "/")
try:
with open(filename, errors='ignore') as fd:
copyfileobj(fd, stdout)
except TypeError:
with open(filename) as fd:
copyfileobj(fd, stdout)
print()
print("----")
print()
if args.asciidoctor:
print(
print(
"""
GERRIT
------

View File

@ -30,49 +30,50 @@ LOCAL_PROPERTIES = 'local.properties'
def safe_mkdirs(d):
if path.isdir(d):
return
try:
makedirs(d)
except OSError as err:
if not path.isdir(d):
raise err
if path.isdir(d):
return
try:
makedirs(d)
except OSError as err:
if not path.isdir(d):
raise err
def download_properties(root_dir):
""" Get the download properties.
""" Get the download properties.
First tries to find the properties file in the given root directory,
and if not found there, tries in the Gerrit settings folder in the
user's home directory.
First tries to find the properties file in the given root directory,
and if not found there, tries in the Gerrit settings folder in the
user's home directory.
Returns a set of download properties, which may be empty.
Returns a set of download properties, which may be empty.
"""
p = {}
local_prop = path.join(root_dir, LOCAL_PROPERTIES)
if not path.isfile(local_prop):
local_prop = path.join(GERRIT_HOME, LOCAL_PROPERTIES)
if path.isfile(local_prop):
try:
with open(local_prop) as fd:
for line in fd:
if line.startswith('download.'):
d = [e.strip() for e in line.split('=', 1)]
name, url = d[0], d[1]
p[name[len('download.'):]] = url
except OSError:
pass
return p
"""
p = {}
local_prop = path.join(root_dir, LOCAL_PROPERTIES)
if not path.isfile(local_prop):
local_prop = path.join(GERRIT_HOME, LOCAL_PROPERTIES)
if path.isfile(local_prop):
try:
with open(local_prop) as fd:
for line in fd:
if line.startswith('download.'):
d = [e.strip() for e in line.split('=', 1)]
name, url = d[0], d[1]
p[name[len('download.'):]] = url
except OSError:
pass
return p
def cache_entry(args):
if args.v:
h = args.v
else:
h = sha1(args.u.encode('utf-8')).hexdigest()
name = '%s-%s' % (path.basename(args.o), h)
return path.join(CACHE_DIR, name)
if args.v:
h = args.v
else:
h = sha1(args.u.encode('utf-8')).hexdigest()
name = '%s-%s' % (path.basename(args.o), h)
return path.join(CACHE_DIR, name)
opts = OptionParser()
opts.add_option('-o', help='local output file')
@ -85,89 +86,89 @@ args, _ = opts.parse_args()
root_dir = args.o
while root_dir and path.dirname(root_dir) != root_dir:
root_dir, n = path.split(root_dir)
if n == 'WORKSPACE':
break
root_dir, n = path.split(root_dir)
if n == 'WORKSPACE':
break
redirects = download_properties(root_dir)
cache_ent = cache_entry(args)
src_url = resolve_url(args.u, redirects)
if not path.exists(cache_ent):
try:
safe_mkdirs(path.dirname(cache_ent))
except OSError as err:
print('error creating directory %s: %s' %
(path.dirname(cache_ent), err), file=stderr)
exit(1)
try:
safe_mkdirs(path.dirname(cache_ent))
except OSError as err:
print('error creating directory %s: %s' %
(path.dirname(cache_ent), err), file=stderr)
exit(1)
print('Download %s' % src_url, file=stderr)
try:
check_call(['curl', '--proxy-anyauth', '-ksSfLo', cache_ent, src_url])
except OSError as err:
print('could not invoke curl: %s\nis curl installed?' % err, file=stderr)
exit(1)
except CalledProcessError as err:
print('error using curl: %s' % err, file=stderr)
exit(1)
print('Download %s' % src_url, file=stderr)
try:
check_call(['curl', '--proxy-anyauth', '-ksSfLo', cache_ent, src_url])
except OSError as err:
print('could not invoke curl: %s\nis curl installed?' % err, file=stderr)
exit(1)
except CalledProcessError as err:
print('error using curl: %s' % err, file=stderr)
exit(1)
if args.v:
have = hash_file(sha1(), cache_ent).hexdigest()
if args.v != have:
print((
'%s:\n' +
'expected %s\n' +
'received %s\n') % (src_url, args.v, have), file=stderr)
try:
remove(cache_ent)
except OSError as err:
if path.exists(cache_ent):
print('error removing %s: %s' % (cache_ent, err), file=stderr)
exit(1)
have = hash_file(sha1(), cache_ent).hexdigest()
if args.v != have:
print((
'%s:\n' +
'expected %s\n' +
'received %s\n') % (src_url, args.v, have), file=stderr)
try:
remove(cache_ent)
except OSError as err:
if path.exists(cache_ent):
print('error removing %s: %s' % (cache_ent, err), file=stderr)
exit(1)
exclude = []
if args.x:
exclude += args.x
exclude += args.x
if args.exclude_java_sources:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if n.endswith('.java'):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if n.endswith('.java'):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
if args.unsign:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if (n.endswith('.RSA')
or n.endswith('.SF')
or n.endswith('.LIST')):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if (n.endswith('.RSA')
or n.endswith('.SF')
or n.endswith('.LIST')):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
safe_mkdirs(path.dirname(args.o))
if exclude:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
try:
check_call(['zip', '-d', args.o] + exclude)
except CalledProcessError as err:
print('error removing files from zip: %s' % err, file=stderr)
exit(1)
else:
try:
link(cache_ent, args.o)
except OSError as err:
try:
shutil.copyfile(cache_ent, args.o)
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
try:
check_call(['zip', '-d', args.o] + exclude)
except CalledProcessError as err:
print('error removing files from zip: %s' % err, file=stderr)
exit(1)
else:
try:
link(cache_ent, args.o)
except OSError as err:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)

View File

@ -30,20 +30,20 @@ MAIN = '//tools/eclipse:classpath'
GWT = '//gerrit-gwtui:ui_module'
AUTO = '//lib/auto:auto-value'
JRE = '/'.join([
'org.eclipse.jdt.launching.JRE_CONTAINER',
'org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType',
'JavaSE-1.8',
'org.eclipse.jdt.launching.JRE_CONTAINER',
'org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType',
'JavaSE-1.8',
])
# Map of targets to corresponding classpath collector rules
cp_targets = {
AUTO: '//tools/eclipse:autovalue_classpath_collect',
GWT: '//tools/eclipse:gwt_classpath_collect',
MAIN: '//tools/eclipse:main_classpath_collect',
AUTO: '//tools/eclipse:autovalue_classpath_collect',
GWT: '//tools/eclipse:gwt_classpath_collect',
MAIN: '//tools/eclipse:main_classpath_collect',
}
ROOT = path.abspath(__file__)
while not path.exists(path.join(ROOT, 'WORKSPACE')):
ROOT = path.dirname(ROOT)
ROOT = path.dirname(ROOT)
opts = OptionParser()
opts.add_option('--plugins', help='create eclipse projects for plugins',
@ -56,38 +56,43 @@ args, _ = opts.parse_args()
batch_option = '--batch' if args.batch else None
def _build_bazel_cmd(*args):
cmd = ['bazel']
if batch_option:
cmd.append('--batch')
for arg in args:
cmd.append(arg)
return cmd
cmd = ['bazel']
if batch_option:
cmd.append('--batch')
for arg in args:
cmd.append(arg)
return cmd
def retrieve_ext_location():
return check_output(_build_bazel_cmd('info', 'output_base')).strip()
return check_output(_build_bazel_cmd('info', 'output_base')).strip()
def gen_bazel_path():
bazel = check_output(['which', 'bazel']).strip().decode('UTF-8')
with open(path.join(ROOT, ".bazel_path"), 'w') as fd:
fd.write("bazel=%s\n" % bazel)
fd.write("PATH=%s\n" % environ["PATH"])
bazel = check_output(['which', 'bazel']).strip().decode('UTF-8')
with open(path.join(ROOT, ".bazel_path"), 'w') as fd:
fd.write("bazel=%s\n" % bazel)
fd.write("PATH=%s\n" % environ["PATH"])
def _query_classpath(target):
deps = []
t = cp_targets[target]
try:
check_call(_build_bazel_cmd('build', t))
except CalledProcessError:
exit(1)
name = 'bazel-bin/tools/eclipse/' + t.split(':')[1] + '.runtime_classpath'
deps = [line.rstrip('\n') for line in open(name)]
return deps
deps = []
t = cp_targets[target]
try:
check_call(_build_bazel_cmd('build', t))
except CalledProcessError:
exit(1)
name = 'bazel-bin/tools/eclipse/' + t.split(':')[1] + '.runtime_classpath'
deps = [line.rstrip('\n') for line in open(name)]
return deps
def gen_project(name='gerrit', root=ROOT):
p = path.join(root, '.project')
with open(p, 'w') as fd:
print("""\
p = path.join(root, '.project')
with open(p, 'w') as fd:
print("""\
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>%(name)s</name>
@ -102,16 +107,17 @@ def gen_project(name='gerrit', root=ROOT):
</projectDescription>\
""" % {"name": name}, file=fd)
def gen_plugin_classpath(root):
p = path.join(root, '.classpath')
with open(p, 'w') as fd:
if path.exists(path.join(root, 'src', 'test', 'java')):
testpath = """
p = path.join(root, '.classpath')
with open(p, 'w') as fd:
if path.exists(path.join(root, 'src', 'test', 'java')):
testpath = """
<classpathentry excluding="**/BUILD" kind="src" path="src/test/java"\
out="eclipse-out/test"/>"""
else:
testpath = ""
print("""\
else:
testpath = ""
print("""\
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry excluding="**/BUILD" kind="src" path="src/main/java"/>%(testpath)s
@ -120,186 +126,189 @@ def gen_plugin_classpath(root):
<classpathentry kind="output" path="eclipse-out/classes"/>
</classpath>""" % {"testpath": testpath}, file=fd)
def gen_classpath(ext):
def make_classpath():
impl = minidom.getDOMImplementation()
return impl.createDocument(None, 'classpath', None)
def make_classpath():
impl = minidom.getDOMImplementation()
return impl.createDocument(None, 'classpath', None)
def classpathentry(kind, path, src=None, out=None, exported=None):
e = doc.createElement('classpathentry')
e.setAttribute('kind', kind)
# TODO(davido): Remove this and other exclude BUILD files hack
# when this Bazel bug is fixed:
# https://github.com/bazelbuild/bazel/issues/1083
if kind == 'src':
e.setAttribute('excluding', '**/BUILD')
e.setAttribute('path', path)
if src:
e.setAttribute('sourcepath', src)
if out:
e.setAttribute('output', out)
if exported:
e.setAttribute('exported', 'true')
doc.documentElement.appendChild(e)
def classpathentry(kind, path, src=None, out=None, exported=None):
e = doc.createElement('classpathentry')
e.setAttribute('kind', kind)
# TODO(davido): Remove this and other exclude BUILD files hack
# when this Bazel bug is fixed:
# https://github.com/bazelbuild/bazel/issues/1083
if kind == 'src':
e.setAttribute('excluding', '**/BUILD')
e.setAttribute('path', path)
if src:
e.setAttribute('sourcepath', src)
if out:
e.setAttribute('output', out)
if exported:
e.setAttribute('exported', 'true')
doc.documentElement.appendChild(e)
doc = make_classpath()
src = set()
lib = set()
proto = set()
gwt_src = set()
gwt_lib = set()
plugins = set()
doc = make_classpath()
src = set()
lib = set()
proto = set()
gwt_src = set()
gwt_lib = set()
plugins = set()
# Classpath entries are absolute for cross-cell support
java_library = re.compile('bazel-out/.*?-fastbuild/bin/(.*)/[^/]+[.]jar$')
srcs = re.compile('(.*/external/[^/]+)/jar/(.*)[.]jar')
for p in _query_classpath(MAIN):
if p.endswith('-src.jar'):
# gwt_module() depends on -src.jar for Java to JavaScript compiles.
if p.startswith("external"):
p = path.join(ext, p)
gwt_lib.add(p)
continue
m = java_library.match(p)
if m:
src.add(m.group(1))
# Exceptions: both source and lib
if p.endswith('libquery_parser.jar') or \
p.endswith('libgerrit-prolog-common.jar'):
lib.add(p)
# JGit dependency from external repository
if 'gerrit-' not in p and 'jgit' in p:
lib.add(p)
# Assume any jars in /proto/ are from java_proto_library rules
if '/bin/proto/' in p:
proto.add(p)
else:
# Don't mess up with Bazel internal test runner dependencies.
# When we use Eclipse we rely on it for running the tests
if p.endswith("external/bazel_tools/tools/jdk/TestRunner_deploy.jar"):
continue
if p.startswith("external"):
p = path.join(ext, p)
lib.add(p)
for p in _query_classpath(GWT):
m = java_library.match(p)
if m:
gwt_src.add(m.group(1))
classpathentry('src', 'java')
classpathentry('src', 'javatests', out='eclipse-out/test')
classpathentry('src', 'resources')
for s in sorted(src):
out = None
if s.startswith('lib/'):
out = 'eclipse-out/lib'
elif s.startswith('plugins/'):
if args.plugins:
plugins.add(s)
continue
out = 'eclipse-out/' + s
p = path.join(s, 'java')
if path.exists(p):
classpathentry('src', p, out=out)
continue
for env in ['main', 'test']:
o = None
if out:
o = out + '/' + env
elif env == 'test':
o = 'eclipse-out/test'
for srctype in ['java', 'resources']:
p = path.join(s, 'src', env, srctype)
if path.exists(p):
classpathentry('src', p, out=o)
for libs in [lib, gwt_lib]:
for j in sorted(libs):
s = None
m = srcs.match(j)
if m:
prefix = m.group(1)
suffix = m.group(2)
p = path.join(prefix, "jar", "%s-src.jar" % suffix)
if path.exists(p):
s = p
if args.plugins:
classpathentry('lib', j, s, exported=True)
else:
# Filter out the source JARs that we pull through transitive closure of
# GWT plugin API (we add source directories themself). Exception is
# libEdit-src.jar, that is needed for GWT SDM to work.
m = java_library.match(j)
if m:
if m.group(1).startswith("gerrit-") and \
j.endswith("-src.jar") and \
not j.endswith("libEdit-src.jar"):
# Classpath entries are absolute for cross-cell support
java_library = re.compile('bazel-out/.*?-fastbuild/bin/(.*)/[^/]+[.]jar$')
srcs = re.compile('(.*/external/[^/]+)/jar/(.*)[.]jar')
for p in _query_classpath(MAIN):
if p.endswith('-src.jar'):
# gwt_module() depends on -src.jar for Java to JavaScript compiles.
if p.startswith("external"):
p = path.join(ext, p)
gwt_lib.add(p)
continue
classpathentry('lib', j, s)
for p in sorted(proto):
s = p.replace('-fastbuild/bin/proto/lib', '-fastbuild/genfiles/proto/')
s = s.replace('.jar', '-src.jar')
classpathentry('lib', p, s)
m = java_library.match(p)
if m:
src.add(m.group(1))
# Exceptions: both source and lib
if p.endswith('libquery_parser.jar') or \
p.endswith('libgerrit-prolog-common.jar'):
lib.add(p)
# JGit dependency from external repository
if 'gerrit-' not in p and 'jgit' in p:
lib.add(p)
# Assume any jars in /proto/ are from java_proto_library rules
if '/bin/proto/' in p:
proto.add(p)
else:
# Don't mess up with Bazel internal test runner dependencies.
# When we use Eclipse we rely on it for running the tests
if p.endswith("external/bazel_tools/tools/jdk/TestRunner_deploy.jar"):
continue
if p.startswith("external"):
p = path.join(ext, p)
lib.add(p)
for s in sorted(gwt_src):
p = path.join(ROOT, s, 'src', 'main', 'java')
if path.exists(p):
classpathentry('lib', p, out='eclipse-out/gwtsrc')
for p in _query_classpath(GWT):
m = java_library.match(p)
if m:
gwt_src.add(m.group(1))
classpathentry('con', JRE)
classpathentry('output', 'eclipse-out/classes')
classpathentry('src', 'java')
classpathentry('src', 'javatests', out='eclipse-out/test')
classpathentry('src', 'resources')
for s in sorted(src):
out = None
p = path.join(ROOT, '.classpath')
with open(p, 'w') as fd:
doc.writexml(fd, addindent='\t', newl='\n', encoding='UTF-8')
if s.startswith('lib/'):
out = 'eclipse-out/lib'
elif s.startswith('plugins/'):
if args.plugins:
plugins.add(s)
continue
out = 'eclipse-out/' + s
p = path.join(s, 'java')
if path.exists(p):
classpathentry('src', p, out=out)
continue
for env in ['main', 'test']:
o = None
if out:
o = out + '/' + env
elif env == 'test':
o = 'eclipse-out/test'
for srctype in ['java', 'resources']:
p = path.join(s, 'src', env, srctype)
if path.exists(p):
classpathentry('src', p, out=o)
for libs in [lib, gwt_lib]:
for j in sorted(libs):
s = None
m = srcs.match(j)
if m:
prefix = m.group(1)
suffix = m.group(2)
p = path.join(prefix, "jar", "%s-src.jar" % suffix)
if path.exists(p):
s = p
if args.plugins:
classpathentry('lib', j, s, exported=True)
else:
# Filter out the source JARs that we pull through transitive closure of
# GWT plugin API (we add source directories themself). Exception is
# libEdit-src.jar, that is needed for GWT SDM to work.
m = java_library.match(j)
if m:
if m.group(1).startswith("gerrit-") and \
j.endswith("-src.jar") and \
not j.endswith("libEdit-src.jar"):
continue
classpathentry('lib', j, s)
for p in sorted(proto):
s = p.replace('-fastbuild/bin/proto/lib', '-fastbuild/genfiles/proto/')
s = s.replace('.jar', '-src.jar')
classpathentry('lib', p, s)
for s in sorted(gwt_src):
p = path.join(ROOT, s, 'src', 'main', 'java')
if path.exists(p):
classpathentry('lib', p, out='eclipse-out/gwtsrc')
classpathentry('con', JRE)
classpathentry('output', 'eclipse-out/classes')
p = path.join(ROOT, '.classpath')
with open(p, 'w') as fd:
doc.writexml(fd, addindent='\t', newl='\n', encoding='UTF-8')
if args.plugins:
for plugin in plugins:
plugindir = path.join(ROOT, plugin)
try:
gen_project(plugin.replace('plugins/', ""), plugindir)
gen_plugin_classpath(plugindir)
except (IOError, OSError) as err:
print('error generating project for %s: %s' % (plugin, err),
file=sys.stderr)
if args.plugins:
for plugin in plugins:
plugindir = path.join(ROOT, plugin)
try:
gen_project(plugin.replace('plugins/', ""), plugindir)
gen_plugin_classpath(plugindir)
except (IOError, OSError) as err:
print('error generating project for %s: %s' % (plugin, err),
file=sys.stderr)
def gen_factorypath(ext):
doc = minidom.getDOMImplementation().createDocument(None, 'factorypath', None)
for jar in _query_classpath(AUTO):
e = doc.createElement('factorypathentry')
e.setAttribute('kind', 'EXTJAR')
e.setAttribute('id', path.join(ext, jar))
e.setAttribute('enabled', 'true')
e.setAttribute('runInBatchMode', 'false')
doc.documentElement.appendChild(e)
doc = minidom.getDOMImplementation().createDocument(None, 'factorypath', None)
for jar in _query_classpath(AUTO):
e = doc.createElement('factorypathentry')
e.setAttribute('kind', 'EXTJAR')
e.setAttribute('id', path.join(ext, jar))
e.setAttribute('enabled', 'true')
e.setAttribute('runInBatchMode', 'false')
doc.documentElement.appendChild(e)
p = path.join(ROOT, '.factorypath')
with open(p, 'w') as fd:
doc.writexml(fd, addindent='\t', newl='\n', encoding='UTF-8')
p = path.join(ROOT, '.factorypath')
with open(p, 'w') as fd:
doc.writexml(fd, addindent='\t', newl='\n', encoding='UTF-8')
try:
ext_location = retrieve_ext_location().decode("utf-8")
gen_project(args.project_name)
gen_classpath(ext_location)
gen_factorypath(ext_location)
gen_bazel_path()
ext_location = retrieve_ext_location().decode("utf-8")
gen_project(args.project_name)
gen_classpath(ext_location)
gen_factorypath(ext_location)
gen_bazel_path()
# TODO(davido): Remove this when GWT gone
gwt_working_dir = ".gwt_work_dir"
if not path.isdir(gwt_working_dir):
makedirs(path.join(ROOT, gwt_working_dir))
# TODO(davido): Remove this when GWT gone
gwt_working_dir = ".gwt_work_dir"
if not path.isdir(gwt_working_dir):
makedirs(path.join(ROOT, gwt_working_dir))
try:
check_call(_build_bazel_cmd('build', MAIN, GWT, '//java/org/eclipse/jgit:libEdit-src.jar'))
except CalledProcessError:
exit(1)
try:
check_call(_build_bazel_cmd('build', MAIN, GWT, '//java/org/eclipse/jgit:libEdit-src.jar'))
except CalledProcessError:
exit(1)
except KeyboardInterrupt:
print('Interrupted by user', file=sys.stderr)
exit(1)
print('Interrupted by user', file=sys.stderr)
exit(1)

View File

@ -33,137 +33,139 @@ import bowerutil
# list of licenses for packages that don't specify one in their bower.json file.
package_licenses = {
"codemirror-minified": "codemirror-minified",
"es6-promise": "es6-promise",
"fetch": "fetch",
"font-roboto": "polymer",
"iron-a11y-announcer": "polymer",
"iron-a11y-keys-behavior": "polymer",
"iron-autogrow-textarea": "polymer",
"iron-behaviors": "polymer",
"iron-dropdown": "polymer",
"iron-fit-behavior": "polymer",
"iron-flex-layout": "polymer",
"iron-form-element-behavior": "polymer",
"iron-icon": "polymer",
"iron-iconset-svg": "polymer",
"iron-input": "polymer",
"iron-menu-behavior": "polymer",
"iron-meta": "polymer",
"iron-overlay-behavior": "polymer",
"iron-resizable-behavior": "polymer",
"iron-selector": "polymer",
"iron-validatable-behavior": "polymer",
"moment": "moment",
"neon-animation": "polymer",
"page": "page.js",
"paper-button": "polymer",
"paper-icon-button": "polymer",
"paper-input": "polymer",
"paper-item": "polymer",
"paper-listbox": "polymer",
"paper-toggle-button": "polymer",
"paper-styles": "polymer",
"paper-tabs": "polymer",
"polymer": "polymer",
"polymer-resin": "polymer",
"promise-polyfill": "promise-polyfill",
"web-animations-js": "Apache2.0",
"webcomponentsjs": "polymer",
"paper-material": "polymer",
"paper-styles": "polymer",
"paper-behaviors": "polymer",
"paper-ripple": "polymer",
"iron-checked-element-behavior": "polymer",
"font-roboto": "polymer",
"codemirror-minified": "codemirror-minified",
"es6-promise": "es6-promise",
"fetch": "fetch",
"font-roboto": "polymer",
"iron-a11y-announcer": "polymer",
"iron-a11y-keys-behavior": "polymer",
"iron-autogrow-textarea": "polymer",
"iron-behaviors": "polymer",
"iron-dropdown": "polymer",
"iron-fit-behavior": "polymer",
"iron-flex-layout": "polymer",
"iron-form-element-behavior": "polymer",
"iron-icon": "polymer",
"iron-iconset-svg": "polymer",
"iron-input": "polymer",
"iron-menu-behavior": "polymer",
"iron-meta": "polymer",
"iron-overlay-behavior": "polymer",
"iron-resizable-behavior": "polymer",
"iron-selector": "polymer",
"iron-validatable-behavior": "polymer",
"moment": "moment",
"neon-animation": "polymer",
"page": "page.js",
"paper-button": "polymer",
"paper-icon-button": "polymer",
"paper-input": "polymer",
"paper-item": "polymer",
"paper-listbox": "polymer",
"paper-toggle-button": "polymer",
"paper-styles": "polymer",
"paper-tabs": "polymer",
"polymer": "polymer",
"polymer-resin": "polymer",
"promise-polyfill": "promise-polyfill",
"web-animations-js": "Apache2.0",
"webcomponentsjs": "polymer",
"paper-material": "polymer",
"paper-styles": "polymer",
"paper-behaviors": "polymer",
"paper-ripple": "polymer",
"iron-checked-element-behavior": "polymer",
"font-roboto": "polymer",
}
def build_bower_json(version_targets, seeds):
"""Generate bower JSON file, return its path.
"""Generate bower JSON file, return its path.
Args:
version_targets: bazel target names of the versions.json file.
seeds: an iterable of bower package names of the seed packages, ie.
the packages whose versions we control manually.
"""
bower_json = collections.OrderedDict()
bower_json['name'] = 'bower2bazel-output'
bower_json['version'] = '0.0.0'
bower_json['description'] = 'Auto-generated bower.json for dependency management'
bower_json['private'] = True
bower_json['dependencies'] = {}
Args:
version_targets: bazel target names of the versions.json file.
seeds: an iterable of bower package names of the seed packages, ie.
the packages whose versions we control manually.
"""
bower_json = collections.OrderedDict()
bower_json['name'] = 'bower2bazel-output'
bower_json['version'] = '0.0.0'
bower_json['description'] = 'Auto-generated bower.json for dependency management'
bower_json['private'] = True
bower_json['dependencies'] = {}
seeds = set(seeds)
for v in version_targets:
path = os.path.join("bazel-out/*-fastbuild/bin", v.lstrip("/").replace(":", "/"))
fs = glob.glob(path)
assert len(fs) == 1, '%s: file not found or multiple files found: %s' % (path, fs)
with open(fs[0]) as f:
j = json.load(f)
if "" in j:
# drop dummy entries.
del j[""]
seeds = set(seeds)
for v in version_targets:
path = os.path.join("bazel-out/*-fastbuild/bin", v.lstrip("/").replace(":", "/"))
fs = glob.glob(path)
assert len(fs) == 1, '%s: file not found or multiple files found: %s' % (path, fs)
with open(fs[0]) as f:
j = json.load(f)
if "" in j:
# drop dummy entries.
del j[""]
trimmed = {}
for k, v in j.items():
if k in seeds:
trimmed[k] = v
trimmed = {}
for k, v in j.items():
if k in seeds:
trimmed[k] = v
bower_json['dependencies'].update(trimmed)
bower_json['dependencies'].update(trimmed)
tmpdir = tempfile.mkdtemp()
ret = os.path.join(tmpdir, 'bower.json')
with open(ret, 'w') as f:
json.dump(bower_json, f, indent=2)
return ret
tmpdir = tempfile.mkdtemp()
ret = os.path.join(tmpdir, 'bower.json')
with open(ret, 'w') as f:
json.dump(bower_json, f, indent=2)
return ret
def decode(input):
try:
return input.decode("utf-8")
except TypeError:
return input
try:
return input.decode("utf-8")
except TypeError:
return input
def bower_command(args):
base = subprocess.check_output(["bazel", "info", "output_base"]).strip()
exp = os.path.join(decode(base), "external", "bower", "*npm_binary.tgz")
fs = sorted(glob.glob(exp))
assert len(fs) == 1, "bower tarball not found or have multiple versions %s" % fs
return ["python", os.getcwd() + "/tools/js/run_npm_binary.py", sorted(fs)[0]] + args
base = subprocess.check_output(["bazel", "info", "output_base"]).strip()
exp = os.path.join(decode(base), "external", "bower", "*npm_binary.tgz")
fs = sorted(glob.glob(exp))
assert len(fs) == 1, "bower tarball not found or have multiple versions %s" % fs
return ["python", os.getcwd() + "/tools/js/run_npm_binary.py", sorted(fs)[0]] + args
def main(args):
opts = optparse.OptionParser()
opts.add_option('-w', help='.bzl output for WORKSPACE')
opts.add_option('-b', help='.bzl output for //lib:BUILD')
opts, args = opts.parse_args()
opts = optparse.OptionParser()
opts.add_option('-w', help='.bzl output for WORKSPACE')
opts.add_option('-b', help='.bzl output for //lib:BUILD')
opts, args = opts.parse_args()
target_str = subprocess.check_output([
"bazel", "query", "kind(bower_component_bundle, //polygerrit-ui/...)"])
seed_str = subprocess.check_output([
"bazel", "query", "attr(seed, 1, kind(bower_component, deps(//polygerrit-ui/...)))"])
targets = [s for s in decode(target_str).split('\n') if s]
seeds = [s for s in decode(seed_str).split('\n') if s]
prefix = "//lib/js:"
non_seeds = [s for s in seeds if not s.startswith(prefix)]
assert not non_seeds, non_seeds
seeds = set([s[len(prefix):] for s in seeds])
target_str = subprocess.check_output([
"bazel", "query", "kind(bower_component_bundle, //polygerrit-ui/...)"])
seed_str = subprocess.check_output([
"bazel", "query", "attr(seed, 1, kind(bower_component, deps(//polygerrit-ui/...)))"])
targets = [s for s in decode(target_str).split('\n') if s]
seeds = [s for s in decode(seed_str).split('\n') if s]
prefix = "//lib/js:"
non_seeds = [s for s in seeds if not s.startswith(prefix)]
assert not non_seeds, non_seeds
seeds = set([s[len(prefix):] for s in seeds])
version_targets = [t + "-versions.json" for t in targets]
subprocess.check_call(['bazel', 'build'] + version_targets)
bower_json_path = build_bower_json(version_targets, seeds)
dir = os.path.dirname(bower_json_path)
cmd = bower_command(["install"])
version_targets = [t + "-versions.json" for t in targets]
subprocess.check_call(['bazel', 'build'] + version_targets)
bower_json_path = build_bower_json(version_targets, seeds)
dir = os.path.dirname(bower_json_path)
cmd = bower_command(["install"])
build_out = sys.stdout
if opts.b:
build_out = open(opts.b + ".tmp", 'w')
build_out = sys.stdout
if opts.b:
build_out = open(opts.b + ".tmp", 'w')
ws_out = sys.stdout
if opts.b:
ws_out = open(opts.w + ".tmp", 'w')
ws_out = sys.stdout
if opts.b:
ws_out = open(opts.w + ".tmp", 'w')
header = """# DO NOT EDIT
header = """# DO NOT EDIT
# generated with the following command:
#
# %s
@ -171,30 +173,30 @@ def main(args):
""" % ' '.join(sys.argv)
ws_out.write(header)
build_out.write(header)
ws_out.write(header)
build_out.write(header)
oldwd = os.getcwd()
os.chdir(dir)
subprocess.check_call(cmd)
oldwd = os.getcwd()
os.chdir(dir)
subprocess.check_call(cmd)
interpret_bower_json(seeds, ws_out, build_out)
ws_out.close()
build_out.close()
interpret_bower_json(seeds, ws_out, build_out)
ws_out.close()
build_out.close()
os.chdir(oldwd)
os.rename(opts.w + ".tmp", opts.w)
os.rename(opts.b + ".tmp", opts.b)
os.chdir(oldwd)
os.rename(opts.w + ".tmp", opts.w)
os.rename(opts.b + ".tmp", opts.b)
def dump_workspace(data, seeds, out):
out.write('load("//tools/bzl:js.bzl", "bower_archive")\n\n')
out.write('def load_bower_archives():\n')
out.write('load("//tools/bzl:js.bzl", "bower_archive")\n\n')
out.write('def load_bower_archives():\n')
for d in data:
if d["name"] in seeds:
continue
out.write(""" bower_archive(
for d in data:
if d["name"] in seeds:
continue
out.write(""" bower_archive(
name = "%(name)s",
package = "%(normalized-name)s",
version = "%(version)s",
@ -203,48 +205,48 @@ def dump_workspace(data, seeds, out):
def dump_build(data, seeds, out):
out.write('load("//tools/bzl:js.bzl", "bower_component")\n\n')
out.write('def define_bower_components():\n')
for d in data:
out.write(" bower_component(\n")
out.write(" name = \"%s\",\n" % d["name"])
out.write(" license = \"//lib:LICENSE-%s\",\n" % d["bazel-license"])
deps = sorted(d.get("dependencies", {}).keys())
if deps:
if len(deps) == 1:
out.write(" deps = [ \":%s\" ],\n" % deps[0])
else:
out.write(" deps = [\n")
for dep in deps:
out.write(" \":%s\",\n" % dep)
out.write(" ],\n")
if d["name"] in seeds:
out.write(" seed = True,\n")
out.write(" )\n")
# done
out.write('load("//tools/bzl:js.bzl", "bower_component")\n\n')
out.write('def define_bower_components():\n')
for d in data:
out.write(" bower_component(\n")
out.write(" name = \"%s\",\n" % d["name"])
out.write(" license = \"//lib:LICENSE-%s\",\n" % d["bazel-license"])
deps = sorted(d.get("dependencies", {}).keys())
if deps:
if len(deps) == 1:
out.write(" deps = [ \":%s\" ],\n" % deps[0])
else:
out.write(" deps = [\n")
for dep in deps:
out.write(" \":%s\",\n" % dep)
out.write(" ],\n")
if d["name"] in seeds:
out.write(" seed = True,\n")
out.write(" )\n")
# done
def interpret_bower_json(seeds, ws_out, build_out):
out = subprocess.check_output(["find", "bower_components/", "-name", ".bower.json"])
out = subprocess.check_output(["find", "bower_components/", "-name", ".bower.json"])
data = []
for f in sorted(decode(out).split('\n')):
if not f:
continue
pkg = json.load(open(f))
pkg_name = pkg["name"]
data = []
for f in sorted(decode(out).split('\n')):
if not f:
continue
pkg = json.load(open(f))
pkg_name = pkg["name"]
pkg["bazel-sha1"] = bowerutil.hash_bower_component(
hashlib.sha1(), os.path.dirname(f)).hexdigest()
license = package_licenses.get(pkg_name, "DO_NOT_DISTRIBUTE")
pkg["bazel-sha1"] = bowerutil.hash_bower_component(
hashlib.sha1(), os.path.dirname(f)).hexdigest()
license = package_licenses.get(pkg_name, "DO_NOT_DISTRIBUTE")
pkg["bazel-license"] = license
pkg["normalized-name"] = pkg["_originalSource"]
data.append(pkg)
pkg["bazel-license"] = license
pkg["normalized-name"] = pkg["_originalSource"]
data.append(pkg)
dump_workspace(data, seeds, ws_out)
dump_build(data, seeds, build_out)
dump_workspace(data, seeds, ws_out)
dump_build(data, seeds, build_out)
if __name__ == '__main__':
main(sys.argv[1:])
main(sys.argv[1:])

View File

@ -16,31 +16,31 @@ import os
def hash_bower_component(hash_obj, path):
"""Hash the contents of a bower component directory.
"""Hash the contents of a bower component directory.
This is a stable hash of a directory downloaded with `bower install`, minus
the .bower.json file, which is autogenerated each time by bower. Used in lieu
of hashing a zipfile of the contents, since zipfiles are difficult to hash in
a stable manner.
This is a stable hash of a directory downloaded with `bower install`, minus
the .bower.json file, which is autogenerated each time by bower. Used in lieu
of hashing a zipfile of the contents, since zipfiles are difficult to hash in
a stable manner.
Args:
hash_obj: an open hash object, e.g. hashlib.sha1().
path: path to the directory to hash.
Args:
hash_obj: an open hash object, e.g. hashlib.sha1().
path: path to the directory to hash.
Returns:
The passed-in hash_obj.
"""
if not os.path.isdir(path):
raise ValueError('Not a directory: %s' % path)
Returns:
The passed-in hash_obj.
"""
if not os.path.isdir(path):
raise ValueError('Not a directory: %s' % path)
path = os.path.abspath(path)
for root, dirs, files in os.walk(path):
dirs.sort()
for f in sorted(files):
if f == '.bower.json':
continue
p = os.path.join(root, f)
hash_obj.update(p[len(path)+1:].encode("utf-8"))
hash_obj.update(open(p, "rb").read())
path = os.path.abspath(path)
for root, dirs, files in os.walk(path):
dirs.sort()
for f in sorted(files):
if f == '.bower.json':
continue
p = os.path.join(root, f)
hash_obj.update(p[len(path)+1:].encode("utf-8"))
hash_obj.update(open(p, "rb").read())
return hash_obj
return hash_obj

View File

@ -30,99 +30,99 @@ CACHE_DIR = os.path.expanduser(os.path.join(
def bower_cmd(bower, *args):
cmd = bower.split(' ')
cmd.extend(args)
return cmd
cmd = bower.split(' ')
cmd.extend(args)
return cmd
def bower_info(bower, name, package, version):
cmd = bower_cmd(bower, '-l=error', '-j',
'info', '%s#%s' % (package, version))
try:
p = subprocess.Popen(cmd , stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
sys.stderr.write("error executing: %s\n" % ' '.join(cmd))
raise
out, err = p.communicate()
if p.returncode:
sys.stderr.write(err)
raise OSError('Command failed: %s' % ' '.join(cmd))
cmd = bower_cmd(bower, '-l=error', '-j',
'info', '%s#%s' % (package, version))
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
sys.stderr.write("error executing: %s\n" % ' '.join(cmd))
raise
out, err = p.communicate()
if p.returncode:
sys.stderr.write(err)
raise OSError('Command failed: %s' % ' '.join(cmd))
try:
info = json.loads(out)
except ValueError:
raise ValueError('invalid JSON from %s:\n%s' % (" ".join(cmd), out))
info_name = info.get('name')
if info_name != name:
raise ValueError('expected package name %s, got: %s' % (name, info_name))
return info
try:
info = json.loads(out)
except ValueError:
raise ValueError('invalid JSON from %s:\n%s' % (" ".join(cmd), out))
info_name = info.get('name')
if info_name != name:
raise ValueError('expected package name %s, got: %s' % (name, info_name))
return info
def ignore_deps(info):
# Tell bower to ignore dependencies so we just download this component. This
# is just an optimization, since we only pick out the component we need, but
# it's important when downloading sizable dependency trees.
#
# As of 1.6.5 I don't think ignoredDependencies can be specified on the
# command line with --config, so we have to create .bowerrc.
deps = info.get('dependencies')
if deps:
with open(os.path.join('.bowerrc'), 'w') as f:
json.dump({'ignoredDependencies': list(deps.keys())}, f)
# Tell bower to ignore dependencies so we just download this component. This
# is just an optimization, since we only pick out the component we need, but
# it's important when downloading sizable dependency trees.
#
# As of 1.6.5 I don't think ignoredDependencies can be specified on the
# command line with --config, so we have to create .bowerrc.
deps = info.get('dependencies')
if deps:
with open(os.path.join('.bowerrc'), 'w') as f:
json.dump({'ignoredDependencies': list(deps.keys())}, f)
def cache_entry(name, package, version, sha1):
if not sha1:
sha1 = hashlib.sha1('%s#%s' % (package, version)).hexdigest()
return os.path.join(CACHE_DIR, '%s-%s.zip-%s' % (name, version, sha1))
if not sha1:
sha1 = hashlib.sha1('%s#%s' % (package, version)).hexdigest()
return os.path.join(CACHE_DIR, '%s-%s.zip-%s' % (name, version, sha1))
def main(args):
opts = optparse.OptionParser()
opts.add_option('-n', help='short name of component')
opts.add_option('-b', help='bower command')
opts.add_option('-p', help='full package name of component')
opts.add_option('-v', help='version number')
opts.add_option('-s', help='expected content sha1')
opts.add_option('-o', help='output file location')
opts, args_ = opts.parse_args(args)
opts = optparse.OptionParser()
opts.add_option('-n', help='short name of component')
opts.add_option('-b', help='bower command')
opts.add_option('-p', help='full package name of component')
opts.add_option('-v', help='version number')
opts.add_option('-s', help='expected content sha1')
opts.add_option('-o', help='output file location')
opts, args_ = opts.parse_args(args)
assert opts.p
assert opts.v
assert opts.n
assert opts.p
assert opts.v
assert opts.n
cwd = os.getcwd()
outzip = os.path.join(cwd, opts.o)
cached = cache_entry(opts.n, opts.p, opts.v, opts.s)
cwd = os.getcwd()
outzip = os.path.join(cwd, opts.o)
cached = cache_entry(opts.n, opts.p, opts.v, opts.s)
if not os.path.exists(cached):
info = bower_info(opts.b, opts.n, opts.p, opts.v)
ignore_deps(info)
subprocess.check_call(
bower_cmd(opts.b, '--quiet', 'install', '%s#%s' % (opts.p, opts.v)))
bc = os.path.join(cwd, 'bower_components')
subprocess.check_call(
['zip', '-q', '--exclude', '.bower.json', '-r', cached, opts.n],
cwd=bc)
if not os.path.exists(cached):
info = bower_info(opts.b, opts.n, opts.p, opts.v)
ignore_deps(info)
subprocess.check_call(
bower_cmd(opts.b, '--quiet', 'install', '%s#%s' % (opts.p, opts.v)))
bc = os.path.join(cwd, 'bower_components')
subprocess.check_call(
['zip', '-q', '--exclude', '.bower.json', '-r', cached, opts.n],
cwd=bc)
if opts.s:
path = os.path.join(bc, opts.n)
sha1 = bowerutil.hash_bower_component(hashlib.sha1(), path).hexdigest()
if opts.s != sha1:
print((
'%s#%s:\n'
'expected %s\n'
'received %s\n') % (opts.p, opts.v, opts.s, sha1), file=sys.stderr)
try:
os.remove(cached)
except OSError as err:
if path.exists(cached):
print('error removing %s: %s' % (cached, err), file=sys.stderr)
return 1
if opts.s:
path = os.path.join(bc, opts.n)
sha1 = bowerutil.hash_bower_component(hashlib.sha1(), path).hexdigest()
if opts.s != sha1:
print((
'%s#%s:\n'
'expected %s\n'
'received %s\n') % (opts.p, opts.v, opts.s, sha1), file=sys.stderr)
try:
os.remove(cached)
except OSError as err:
if path.exists(cached):
print('error removing %s: %s' % (cached, err), file=sys.stderr)
return 1
shutil.copyfile(cached, outzip)
return 0
shutil.copyfile(cached, outzip)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
sys.exit(main(sys.argv[1:]))

View File

@ -32,49 +32,49 @@ import tempfile
def is_bundled(tar):
# No entries for directories, so scan for a matching prefix.
for entry in tar.getmembers():
if entry.name.startswith('package/node_modules/'):
return True
return False
# No entries for directories, so scan for a matching prefix.
for entry in tar.getmembers():
if entry.name.startswith('package/node_modules/'):
return True
return False
def bundle_dependencies():
with open('package.json') as f:
package = json.load(f)
package['bundledDependencies'] = list(package['dependencies'].keys())
with open('package.json', 'w') as f:
json.dump(package, f)
with open('package.json') as f:
package = json.load(f)
package['bundledDependencies'] = list(package['dependencies'].keys())
with open('package.json', 'w') as f:
json.dump(package, f)
def main(args):
if len(args) != 2:
print('Usage: %s <package> <version>' % sys.argv[0], file=sys.stderr)
return 1
if len(args) != 2:
print('Usage: %s <package> <version>' % sys.argv[0], file=sys.stderr)
return 1
name, version = args
filename = '%s-%s.tgz' % (name, version)
url = 'http://registry.npmjs.org/%s/-/%s' % (name, filename)
name, version = args
filename = '%s-%s.tgz' % (name, version)
url = 'http://registry.npmjs.org/%s/-/%s' % (name, filename)
tmpdir = tempfile.mkdtemp();
tgz = os.path.join(tmpdir, filename)
atexit.register(lambda: shutil.rmtree(tmpdir))
tmpdir = tempfile.mkdtemp();
tgz = os.path.join(tmpdir, filename)
atexit.register(lambda: shutil.rmtree(tmpdir))
subprocess.check_call(['curl', '--proxy-anyauth', '-ksfo', tgz, url])
with tarfile.open(tgz, 'r:gz') as tar:
if is_bundled(tar):
print('%s already has bundled node_modules' % filename)
return 1
tar.extractall(path=tmpdir)
subprocess.check_call(['curl', '--proxy-anyauth', '-ksfo', tgz, url])
with tarfile.open(tgz, 'r:gz') as tar:
if is_bundled(tar):
print('%s already has bundled node_modules' % filename)
return 1
tar.extractall(path=tmpdir)
oldpwd = os.getcwd()
os.chdir(os.path.join(tmpdir, 'package'))
bundle_dependencies()
subprocess.check_call(['npm', 'install'])
subprocess.check_call(['npm', 'pack'])
shutil.copy(filename, os.path.join(oldpwd, filename))
return 0
oldpwd = os.getcwd()
os.chdir(os.path.join(tmpdir, 'package'))
bundle_dependencies()
subprocess.check_call(['npm', 'install'])
subprocess.check_call(['npm', 'pack'])
shutil.copy(filename, os.path.join(oldpwd, filename))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
sys.exit(main(sys.argv[1:]))

View File

@ -27,65 +27,67 @@ import tempfile
def extract(path, outdir, bin):
if os.path.exists(os.path.join(outdir, bin)):
return # Another process finished extracting, ignore.
if os.path.exists(os.path.join(outdir, bin)):
return # Another process finished extracting, ignore.
# Use a temp directory adjacent to outdir so shutil.move can use the same
# device atomically.
tmpdir = tempfile.mkdtemp(dir=os.path.dirname(outdir))
def cleanup():
try:
shutil.rmtree(tmpdir)
except OSError:
pass # Too late now
atexit.register(cleanup)
# Use a temp directory adjacent to outdir so shutil.move can use the same
# device atomically.
tmpdir = tempfile.mkdtemp(dir=os.path.dirname(outdir))
def extract_one(mem):
dest = os.path.join(outdir, mem.name)
tar.extract(mem, path=tmpdir)
try:
os.makedirs(os.path.dirname(dest))
except OSError:
pass # Either exists, or will fail on the next line.
shutil.move(os.path.join(tmpdir, mem.name), dest)
def cleanup():
try:
shutil.rmtree(tmpdir)
except OSError:
pass # Too late now
atexit.register(cleanup)
def extract_one(mem):
dest = os.path.join(outdir, mem.name)
tar.extract(mem, path=tmpdir)
try:
os.makedirs(os.path.dirname(dest))
except OSError:
pass # Either exists, or will fail on the next line.
shutil.move(os.path.join(tmpdir, mem.name), dest)
with tarfile.open(path, 'r:gz') as tar:
for mem in tar.getmembers():
if mem.name != bin:
extract_one(mem)
# Extract bin last so other processes only short circuit when extraction is
# finished.
extract_one(tar.getmember(bin))
with tarfile.open(path, 'r:gz') as tar:
for mem in tar.getmembers():
if mem.name != bin:
extract_one(mem)
# Extract bin last so other processes only short circuit when extraction is
# finished.
extract_one(tar.getmember(bin))
def main(args):
path = args[0]
suffix = '.npm_binary.tgz'
tgz = os.path.basename(path)
path = args[0]
suffix = '.npm_binary.tgz'
tgz = os.path.basename(path)
parts = tgz[:-len(suffix)].split('@')
parts = tgz[:-len(suffix)].split('@')
if not tgz.endswith(suffix) or len(parts) != 2:
print('usage: %s <path/to/npm_binary>' % sys.argv[0], file=sys.stderr)
return 1
if not tgz.endswith(suffix) or len(parts) != 2:
print('usage: %s <path/to/npm_binary>' % sys.argv[0], file=sys.stderr)
return 1
name, _ = parts
name, _ = parts
# Avoid importing from gerrit because we don't want to depend on the right CWD.
sha1 = hashlib.sha1(open(path, 'rb').read()).hexdigest()
outdir = '%s-%s' % (path[:-len(suffix)], sha1)
rel_bin = os.path.join('package', 'bin', name)
bin = os.path.join(outdir, rel_bin)
if not os.path.isfile(bin):
extract(path, outdir, rel_bin)
# Avoid importing from gerrit because we don't want to depend on the right CWD.
sha1 = hashlib.sha1(open(path, 'rb').read()).hexdigest()
outdir = '%s-%s' % (path[:-len(suffix)], sha1)
rel_bin = os.path.join('package', 'bin', name)
bin = os.path.join(outdir, rel_bin)
if not os.path.isfile(bin):
extract(path, outdir, rel_bin)
nodejs = spawn.find_executable('nodejs')
if nodejs:
# Debian installs Node.js as 'nodejs', due to a conflict with another
# package.
subprocess.check_call([nodejs, bin] + args[1:])
else:
subprocess.check_call([bin] + args[1:])
nodejs = spawn.find_executable('nodejs')
if nodejs:
# Debian installs Node.js as 'nodejs', due to a conflict with another
# package.
subprocess.check_call([nodejs, bin] + args[1:])
else:
subprocess.check_call([bin] + args[1:])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
sys.exit(main(sys.argv[1:]))

View File

@ -29,56 +29,56 @@ opts.add_option('-s', action='append', help='triplet of artifactId:type:path')
args, ctx = opts.parse_args()
if not args.v:
print('version is empty', file=stderr)
exit(1)
print('version is empty', file=stderr)
exit(1)
root = path.abspath(__file__)
while not path.exists(path.join(root, 'WORKSPACE')):
root = path.dirname(root)
root = path.dirname(root)
if 'install' == args.a:
cmd = [
'mvn',
'install:install-file',
'-Dversion=%s' % args.v,
]
cmd = [
'mvn',
'install:install-file',
'-Dversion=%s' % args.v,
]
elif 'deploy' == args.a:
cmd = [
'mvn',
'gpg:sign-and-deploy-file',
'-DrepositoryId=%s' % args.repository,
'-Durl=%s' % args.url,
]
cmd = [
'mvn',
'gpg:sign-and-deploy-file',
'-DrepositoryId=%s' % args.repository,
'-Durl=%s' % args.url,
]
else:
print("unknown action -a %s" % args.a, file=stderr)
exit(1)
print("unknown action -a %s" % args.a, file=stderr)
exit(1)
for spec in args.s:
artifact, packaging_type, src = spec.split(':')
exe = cmd + [
'-DpomFile=%s' % path.join(root, 'tools', 'maven', '%s_pom.xml' % artifact),
'-Dpackaging=%s' % packaging_type,
'-Dfile=%s' % src,
]
try:
if environ.get('VERBOSE'):
print(' '.join(exe), file=stderr)
check_output(exe)
except Exception as e:
print('%s command failed: %s\n%s' % (args.a, ' '.join(exe), e),
file=stderr)
if environ.get('VERBOSE') and isinstance(e, CalledProcessError):
print('Command output\n%s' % e.output, file=stderr)
exit(1)
artifact, packaging_type, src = spec.split(':')
exe = cmd + [
'-DpomFile=%s' % path.join(root, 'tools', 'maven', '%s_pom.xml' % artifact),
'-Dpackaging=%s' % packaging_type,
'-Dfile=%s' % src,
]
try:
if environ.get('VERBOSE'):
print(' '.join(exe), file=stderr)
check_output(exe)
except Exception as e:
print('%s command failed: %s\n%s' % (args.a, ' '.join(exe), e),
file=stderr)
if environ.get('VERBOSE') and isinstance(e, CalledProcessError):
print('Command output\n%s' % e.output, file=stderr)
exit(1)
out = stderr
if args.o:
out = open(args.o, 'w')
out = open(args.o, 'w')
with out as fd:
if args.repository:
print('Repository: %s' % args.repository, file=fd)
if args.url:
print('URL: %s' % args.url, file=fd)
print('Version: %s' % args.v, file=fd)
if args.repository:
print('Repository: %s' % args.repository, file=fd)
if args.url:
print('URL: %s' % args.url, file=fd)
print('Version: %s' % args.v, file=fd)

View File

@ -20,8 +20,8 @@ import zipfile
import io
if len(sys.argv) < 3:
print('usage: %s <out.zip> <in.zip>...' % sys.argv[0], file=sys.stderr)
exit(1)
print('usage: %s <out.zip> <in.zip>...' % sys.argv[0], file=sys.stderr)
exit(1)
outfile = sys.argv[1]
infiles = sys.argv[2:]
@ -29,22 +29,22 @@ seen = set()
SERVICES = 'META-INF/services/'
try:
with zipfile.ZipFile(outfile, 'w') as outzip:
services = collections.defaultdict(lambda: '')
for infile in infiles:
with zipfile.ZipFile(infile) as inzip:
for info in inzip.infolist():
n = info.filename
if n in seen:
continue
elif n.startswith(SERVICES):
# Concatenate all provider configuration files.
services[n] += inzip.read(n).decode("UTF-8")
continue
outzip.writestr(info, inzip.read(n))
seen.add(n)
with zipfile.ZipFile(outfile, 'w') as outzip:
services = collections.defaultdict(lambda: '')
for infile in infiles:
with zipfile.ZipFile(infile) as inzip:
for info in inzip.infolist():
n = info.filename
if n in seen:
continue
elif n.startswith(SERVICES):
# Concatenate all provider configuration files.
services[n] += inzip.read(n).decode("UTF-8")
continue
outzip.writestr(info, inzip.read(n))
seen.add(n)
for n, v in list(services.items()):
outzip.writestr(n, v)
for n, v in list(services.items()):
outzip.writestr(n, v)
except Exception as err:
exit('Failed to merge jars: %s' % err)
exit('Failed to merge jars: %s' % err)

View File

@ -101,9 +101,9 @@ def _main():
summary = summary + "."
data = {
"version": Version(options.version),
"previous": options.previous,
"summary": summary
"version": Version(options.version),
"previous": options.previous,
"summary": summary
}
war = os.path.join(

View File

@ -15,57 +15,57 @@
from os import path
REPO_ROOTS = {
'GERRIT': 'http://gerrit-maven.storage.googleapis.com',
'GERRIT_API': 'https://gerrit-api.commondatastorage.googleapis.com/release',
'MAVEN_CENTRAL': 'http://repo1.maven.org/maven2',
'MAVEN_LOCAL': 'file://' + path.expanduser('~/.m2/repository'),
'MAVEN_SNAPSHOT': 'https://oss.sonatype.org/content/repositories/snapshots',
'GERRIT': 'http://gerrit-maven.storage.googleapis.com',
'GERRIT_API': 'https://gerrit-api.commondatastorage.googleapis.com/release',
'MAVEN_CENTRAL': 'http://repo1.maven.org/maven2',
'MAVEN_LOCAL': 'file://' + path.expanduser('~/.m2/repository'),
'MAVEN_SNAPSHOT': 'https://oss.sonatype.org/content/repositories/snapshots',
}
def resolve_url(url, redirects):
""" Resolve URL of a Maven artifact.
""" Resolve URL of a Maven artifact.
prefix:path is passed as URL. prefix identifies known or custom
repositories that can be rewritten in redirects set, passed as
second arguments.
prefix:path is passed as URL. prefix identifies known or custom
repositories that can be rewritten in redirects set, passed as
second arguments.
A special case is supported, when prefix neither exists in
REPO_ROOTS, no in redirects set: the url is returned as is.
This enables plugins to pass custom maven_repository URL as is
directly to maven_jar().
A special case is supported, when prefix neither exists in
REPO_ROOTS, no in redirects set: the url is returned as is.
This enables plugins to pass custom maven_repository URL as is
directly to maven_jar().
Returns a resolved path for Maven artifact.
"""
s = url.find(':')
if s < 0:
return url
scheme, rest = url[:s], url[s+1:]
if scheme in redirects:
root = redirects[scheme]
elif scheme in REPO_ROOTS:
root = REPO_ROOTS[scheme]
else:
return url
root = root.rstrip('/')
rest = rest.lstrip('/')
return '/'.join([root, rest])
Returns a resolved path for Maven artifact.
"""
s = url.find(':')
if s < 0:
return url
scheme, rest = url[:s], url[s+1:]
if scheme in redirects:
root = redirects[scheme]
elif scheme in REPO_ROOTS:
root = REPO_ROOTS[scheme]
else:
return url
root = root.rstrip('/')
rest = rest.lstrip('/')
return '/'.join([root, rest])
def hash_file(hash_obj, path):
"""Hash the contents of a file.
"""Hash the contents of a file.
Args:
hash_obj: an open hash object, e.g. hashlib.sha1().
path: path to the file to hash.
Args:
hash_obj: an open hash object, e.g. hashlib.sha1().
path: path to the file to hash.
Returns:
The passed-in hash_obj.
"""
with open(path, 'rb') as f:
while True:
b = f.read(8192)
if not b:
break
hash_obj.update(b)
return hash_obj
Returns:
The passed-in hash_obj.
"""
with open(path, 'rb') as f:
while True:
b = f.read(8192)
if not b:
break
hash_obj.update(b)
return hash_obj

View File

@ -16,28 +16,30 @@
import unittest
from util import resolve_url
class TestResolveUrl(unittest.TestCase):
""" run to test:
python -m unittest -v util_test
"""
""" run to test:
python -m unittest -v util_test
"""
def testKnown(self):
url = resolve_url('GERRIT:foo.jar', {})
self.assertEqual(url, 'http://gerrit-maven.storage.googleapis.com/foo.jar')
def testKnown(self):
url = resolve_url('GERRIT:foo.jar', {})
self.assertEqual(url, 'http://gerrit-maven.storage.googleapis.com/foo.jar')
def testKnownRedirect(self):
url = resolve_url('MAVEN_CENTRAL:foo.jar',
{'MAVEN_CENTRAL': 'http://my.company.mirror/maven2'})
self.assertEqual(url, 'http://my.company.mirror/maven2/foo.jar')
def testKnownRedirect(self):
url = resolve_url('MAVEN_CENTRAL:foo.jar',
{'MAVEN_CENTRAL': 'http://my.company.mirror/maven2'})
self.assertEqual(url, 'http://my.company.mirror/maven2/foo.jar')
def testCustom(self):
url = resolve_url('http://maven.example.com/release/foo.jar', {})
self.assertEqual(url, 'http://maven.example.com/release/foo.jar')
def testCustom(self):
url = resolve_url('http://maven.example.com/release/foo.jar', {})
self.assertEqual(url, 'http://maven.example.com/release/foo.jar')
def testCustomRedirect(self):
url = resolve_url('MAVEN_EXAMPLE:foo.jar',
{'MAVEN_EXAMPLE': 'http://maven.example.com/release'})
self.assertEqual(url, 'http://maven.example.com/release/foo.jar')
def testCustomRedirect(self):
url = resolve_url('MAVEN_EXAMPLE:foo.jar',
{'MAVEN_EXAMPLE': 'http://maven.example.com/release'})
self.assertEqual(url, 'http://maven.example.com/release/foo.jar')
if __name__ == '__main__':
unittest.main()
unittest.main()

View File

@ -23,24 +23,24 @@ parser = OptionParser()
opts, args = parser.parse_args()
if not len(args):
parser.error('not enough arguments')
parser.error('not enough arguments')
elif len(args) > 1:
parser.error('too many arguments')
parser.error('too many arguments')
DEST_PATTERN = r'\g<1>%s\g<3>' % args[0]
def replace_in_file(filename, src_pattern):
try:
f = open(filename, "r")
s = f.read()
f.close()
s = re.sub(src_pattern, DEST_PATTERN, s)
f = open(filename, "w")
f.write(s)
f.close()
except IOError as err:
print('error updating %s: %s' % (filename, err), file=sys.stderr)
try:
f = open(filename, "r")
s = f.read()
f.close()
s = re.sub(src_pattern, DEST_PATTERN, s)
f = open(filename, "w")
f.write(s)
f.close()
except IOError as err:
print('error updating %s: %s' % (filename, err), file=sys.stderr)
src_pattern = re.compile(r'^(\s*<version>)([-.\w]+)(</version>\s*)$',
@ -48,8 +48,8 @@ src_pattern = re.compile(r'^(\s*<version>)([-.\w]+)(</version>\s*)$',
for project in ['gerrit-acceptance-framework', 'gerrit-extension-api',
'gerrit-plugin-api', 'gerrit-plugin-gwtui',
'gerrit-war']:
pom = os.path.join('tools', 'maven', '%s_pom.xml' % project)
replace_in_file(pom, src_pattern)
pom = os.path.join('tools', 'maven', '%s_pom.xml' % project)
replace_in_file(pom, src_pattern)
src_pattern = re.compile(r'^(GERRIT_VERSION = ")([-.\w]+)(")$', re.MULTILINE)
replace_in_file('version.bzl', src_pattern)