add metadata to the uploaded test runner binary (#4092)

* move s3 api helpers into own TS file

* add demo file

* add comments

* add method to set user metadata by copying it

* set checksum metadata on uploaded binary

* move checksum to the right upload file

* call the right method

* fix require from ts

* convert size to string

* needs extension

* test binary against other projects

* set checksums as headers during first upload

* Revert "set checksums as headers during first upload"

This reverts commit 2043d9ee1f.

* set ACL to public-read when setting metadata on binary

* linting

* pass content-type

* update scripts tests

* linting

* add --platformArch parameter
This commit is contained in:
Gleb Bahmutov
2019-05-02 12:29:37 -04:00
committed by GitHub
parent e12edb8309
commit 6f685ab8b8
9 changed files with 273 additions and 97 deletions

View File

@@ -1,11 +1,11 @@
import { s3helpers } from './s3-api'
const debug = require("debug")("cypress:binary")
import la from 'lazy-ass'
import is from 'check-more-types'
// using "arg" module for parsing CLI arguments
// because it plays really nicely with TypeScript
import arg from 'arg'
import S3 from 'aws-sdk/clients/s3'
import {prop, sortBy, last} from 'ramda'
import {prop, sortBy, last, equals} from 'ramda'
import pluralize from 'pluralize'
// inquirer-confirm is missing type definition
@@ -100,88 +100,6 @@ export const prompts = {
}
}
/**
* Utility object with methods that deal with S3.
* Useful for testing our code that calls S3 methods.
*/
export const s3helpers = {
makeS3 (aws) {
la(is.unemptyString(aws.key), 'missing aws key')
la(is.unemptyString(aws.secret), 'missing aws secret')
return new S3({
accessKeyId: aws.key,
secretAccessKey: aws.secret
})
},
verifyZipFileExists (zipFile: string, bucket: string, s3: S3): Promise<null> {
debug('checking S3 file %s', zipFile)
debug('bucket %s', bucket)
return new Promise((resolve, reject) => {
s3.headObject({
Bucket: bucket,
Key: zipFile
}, (err, data) => {
if (err) {
debug('error getting object %s', zipFile)
debug(err)
return reject(err)
}
debug('s3 data for %s', zipFile)
debug(data)
resolve()
})
})
},
/**
* Returns list of prefixes in a given folder
*/
listS3Objects (uploadDir: string, bucket: string, s3: S3): Promise<string[]> {
la(is.unemptyString(uploadDir), 'invalid upload dir', uploadDir)
return new Promise((resolve, reject) => {
const prefix = uploadDir + '/'
s3.listObjectsV2({
Bucket: bucket,
Prefix: prefix,
Delimiter: '/'
}, (err, result) => {
if (err) {
return reject(err)
}
debug('AWS result in %s %s', bucket, prefix)
debug('%o', result)
resolve(result.CommonPrefixes.map(prop('Prefix')))
})
})
},
async copyS3 (sourceKey: string, destinationKey: string, bucket: string, s3: S3) {
return new Promise((resole, reject) => {
debug('copying %s in bucket %s to %s', sourceKey, bucket, destinationKey)
s3.copyObject({
Bucket: bucket,
CopySource: bucket + '/' + sourceKey,
Key: destinationKey
}, (err, data) => {
if (err) {
return reject(err)
}
debug('result of copying')
debug('%o', data)
})
})
}
}
/**
* Moves binaries built for different platforms into a single
* folder on S3 before officially releasing as a new version.
@@ -191,6 +109,8 @@ export const moveBinaries = async (args = []) => {
const options = arg({
'--commit': String,
'--version': String,
// optional, if passed, only the binary for that platform will be moved
'--platformArch': String,
// aliases
'--sha': '--commit',
'-v': '--version'
@@ -200,7 +120,7 @@ export const moveBinaries = async (args = []) => {
debug('moveBinaries with options %o', options)
// @ts-ignore
la(is.commitId(options['--commit']), 'missing commit SHA', options)
la(is.commitId(options['--commit']), 'missing or invalid commit SHA', options)
// @ts-ignore
la(is.semver(options['--version']), 'missing version to collect', options)
@@ -215,7 +135,14 @@ export const moveBinaries = async (args = []) => {
// found s3 paths with last build for same commit for all platforms
const lastBuilds: Desktop[] = []
const platforms: platformArch[] = uploadUtils.getValidPlatformArchs()
let platforms: platformArch[] = uploadUtils.getValidPlatformArchs()
if (options['--platformArch']) {
const onlyPlatform = options['--platformArch']
console.log('only moving single platform %s', onlyPlatform)
la(uploadUtils.isValidPlatformArch(onlyPlatform), 'invalid platform-arch', onlyPlatform)
platforms = platforms.filter(equals(onlyPlatform))
}
la(platforms.length, 'no platforms to move', platforms)
for (const platformArch of platforms) {
la(uploadUtils.isValidPlatformArch(platformArch),
@@ -277,7 +204,8 @@ export const moveBinaries = async (args = []) => {
const destinationPath = getFullUploadName(options)
console.log('copying test runner %s to %s', lastBuild.platformArch, destinationPath)
await s3helpers.copyS3(lastBuild.s3zipPath, destinationPath, aws.bucket, s3)
await s3helpers.copyS3(lastBuild.s3zipPath, destinationPath, aws.bucket,
'application/zip', 'public-read', s3)
testRunners.push({
platformArch: lastBuild.platformArch,

View File

@@ -0,0 +1,21 @@
// ignore TS errors - we are importing from CoffeeScript files
// @ts-ignore
import uploadUtils from './util/upload'
import { s3helpers } from './s3-api'
const aws = uploadUtils.getS3Credentials()
const s3 = s3helpers.makeS3(aws)
const bucket = aws.bucket
const key = 'beta/binary/3.3.0/darwin-x64/circle-develop-455046b928c861d4457b2ec5426a51de1fda74fd-102212/cypress.zip'
/*
a little demo showing how user metadata can be set and read on a S3 object.
*/
s3helpers.setUserMetadata(bucket, key, {
user: 'bar'
}, s3)
.then(() => {
return s3helpers.getUserMetadata(bucket, key, s3)
}).then(console.log, console.error)

161
scripts/binary/s3-api.ts Normal file
View File

@@ -0,0 +1,161 @@
const debug = require("debug")("cypress:binary")
import la from 'lazy-ass'
import is from 'check-more-types'
import S3 from 'aws-sdk/clients/s3'
import {prop, values, all} from 'ramda'
export const hasOnlyStringValues = (o) =>
all(is.unemptyString, values(o))
/**
* Utility object with methods that deal with S3.
* Useful for testing our code that calls S3 methods.
*/
export const s3helpers = {
makeS3 (aws) {
la(is.unemptyString(aws.key), 'missing aws key')
la(is.unemptyString(aws.secret), 'missing aws secret')
return new S3({
accessKeyId: aws.key,
secretAccessKey: aws.secret
})
},
verifyZipFileExists (zipFile: string, bucket: string, s3: S3): Promise<null> {
debug('checking S3 file %s', zipFile)
debug('bucket %s', bucket)
return new Promise((resolve, reject) => {
s3.headObject({
Bucket: bucket,
Key: zipFile
}, (err, data) => {
if (err) {
debug('error getting object %s', zipFile)
debug(err)
return reject(err)
}
debug('s3 data for %s', zipFile)
debug(data)
resolve()
})
})
},
/**
* Returns list of prefixes in a given folder
*/
listS3Objects (uploadDir: string, bucket: string, s3: S3): Promise<string[]> {
la(is.unemptyString(uploadDir), 'invalid upload dir', uploadDir)
return new Promise((resolve, reject) => {
const prefix = uploadDir + '/'
s3.listObjectsV2({
Bucket: bucket,
Prefix: prefix,
Delimiter: '/'
}, (err, result) => {
if (err) {
return reject(err)
}
debug('AWS result in %s %s', bucket, prefix)
debug('%o', result)
resolve(result.CommonPrefixes.map(prop('Prefix')))
})
})
},
/**
* Copies one S3 object into another key, metadata is copied.
* For copying a public zip file use content 'application/zip'
* and ACL 'public-read'
*/
copyS3 (sourceKey: string, destinationKey: string, bucket: string,
contentType: S3.ContentType, acl: S3.ObjectCannedACL,
s3: S3): Promise<S3.CopyObjectOutput> {
return new Promise((resolve, reject) => {
debug('copying %s in bucket %s to %s', sourceKey, bucket, destinationKey)
const params: S3.CopyObjectRequest = {
Bucket: bucket,
CopySource: bucket + '/' + sourceKey,
Key: destinationKey,
// when we copy S3 object, copy the original metadata, if any
MetadataDirective: 'COPY',
ContentType: contentType,
ACL: acl
}
s3.copyObject(params, (err, data) => {
if (err) {
return reject(err)
}
debug('result of copying')
debug('%o', data)
resolve(data)
})
})
},
/**
* Returns user metadata for the given S3 object.
* Note: on S3 when adding user metadata, each key is prefixed with "x-amz-meta-"
* but the returned object has these prefixes stripped. Thus if we set
* a single "x-amz-meta-user: gleb", the resolved object will be simply {user: "gleb"}
*/
getUserMetadata (bucket: string, key: string, s3: S3): Promise<S3.Metadata> {
return new Promise((resole, reject) => {
debug('getting user metadata from %s %s', bucket, key)
s3.headObject({
Bucket: bucket,
Key: key
}, (err, data) => {
if (err) {
return reject(err)
}
debug('user metadata')
debug('%o', data.Metadata)
resole(data.Metadata)
})
})
},
/**
* Setting user metadata can be accomplished with copying the object back onto itself
* with replaced metadata object.
*/
setUserMetadata (bucket: string, key: string, metadata: S3.Metadata,
contentType: S3.ContentType, acl: S3.ObjectCannedACL, s3: S3): Promise<S3.CopyObjectOutput> {
la(hasOnlyStringValues(metadata),
'metadata object can only have string values', metadata)
return new Promise((resolve, reject) => {
debug('setting metadata to %o for %s %s', metadata, bucket, key)
const params: S3.CopyObjectRequest = {
Bucket: bucket,
CopySource: bucket + '/' + key,
Key: key,
Metadata: metadata,
MetadataDirective: 'REPLACE',
ContentType: contentType,
ACL: acl
}
s3.copyObject(params, (err, data) => {
if (err) {
return reject(err)
}
debug('result of copying')
debug('%o', data)
resolve(data)
})
})
}
}

View File

@@ -10,9 +10,11 @@ debug = require('gulp-debug')
gulp = require("gulp")
human = require("human-interval")
R = require("ramda")
hasha = require('hasha')
konfig = require('../binary/get-config')()
uploadUtils = require("./util/upload")
s3helpers = require("./s3-api").s3helpers
# we zip the binary on every platform and upload under same name
binaryExtension = ".zip"
@@ -70,6 +72,8 @@ uploadFile = (options) ->
headers = {}
headers["Cache-Control"] = "no-cache"
key = null
gulp.src(options.file)
.pipe rename (p) =>
p.basename = path.basename(uploadFileName, binaryExtension)
@@ -77,12 +81,37 @@ uploadFile = (options) ->
console.log("renaming upload to", p.dirname, p.basename)
la(check.unemptyString(p.basename), "missing basename")
la(check.unemptyString(p.dirname), "missing dirname")
key = p.dirname + uploadFileName
p
.pipe debug()
.pipe publisher.publish(headers)
.pipe awspublish.reporter()
.on "error", reject
.on "end", resolve
.on "end", () -> resolve(key)
setChecksum = (filename, key) =>
console.log('setting checksum for file %s', filename)
console.log('on s3 object %s', key)
la(check.unemptyString(filename), 'expected filename', filename)
la(check.unemptyString(key), 'expected uploaded S3 key', key)
checksum = hasha.fromFileSync(filename)
size = fs.statSync(filename).size
console.log('SHA256 checksum %s', checksum)
console.log('size', size)
aws = uploadUtils.getS3Credentials()
s3 = s3helpers.makeS3(aws)
# S3 object metadata can only have string values
metadata = {
checksum,
size: String(size)
}
# by default s3.copyObject does not preserve ACL when copying
# thus we need to reset it for our public files
s3helpers.setUserMetadata(aws.bucket, key, metadata,
'application/zip', 'public-read', s3)
uploadUniqueBinary = (args = []) ->
options = minimist(args, {
@@ -114,6 +143,8 @@ uploadUniqueBinary = (args = []) ->
options.platformArch = uploadUtils.getUploadNameByOsAndArch(platform)
uploadFile(options)
.then (key) ->
setChecksum(options.file, key)
.then () ->
cdnUrl = getCDN({
version: options.version,

View File

@@ -1,6 +1,6 @@
awspublish = require('gulp-awspublish')
rename = require('gulp-rename')
debug = require('gulp-debug')
awspublish = require('gulp-awspublish')
rename = require('gulp-rename')
debug = require('gulp-debug')
fs = require("fs-extra")
cp = require("child_process")
path = require("path")