feat: support test retries (#3968)

* add retries e2e test

* restore runner/test/eslintrc

* use mocha pass event, move runner.spec to runner package

* fix .eslintignore

* remove npmInstall logic in helper/e2e script, force custom reporters to use our mocha

* temp 04/09/20 [skip ci]

* add retries output to server/reporter, fix mocha pass event order, cleanup

* e2e tests - dont run electron tests in other browsers

* Update readme to reflect how to start server for cypress tests

* fix after merge

* fix .coffee -> .js after merge

* fix attempt.tsx

* fix runnable titles emitted to terminal reporters

* fix more tests: update snapshots, fix 7_record_spec, 8_reporters_spec

* remove styling for 'attempt-error-region' so it's not indented

- This was the older styling before error improvements and is no longer
necessary.

* try 2: fix rerun before/after hooks

* fix runner with only, runner snapshots, lint fixes

* temp 04/29/20 [skip ci]

* backport changes from test-retries

* change logic to rerun before hooks after top navigation

* fix windowSize for browser e2e test

* fix windowSize for xvfb chrome in e2e test

* ok fine, just disable screenshots

* fix after merge: decaffed navigation.js

* update server/unit test snapshots

* fix after merge: decaffed aliases.js

* fix usage of cypress --parallel flag in circle.yml

* fix circle.yml integration-tests jobs

* fix decaf related typo

* fix circle.yml separate command for runner-integration-tests

* update runner/integration tests + snapshot after error improvements

* fix runner/integration snapshots for chrome/ff stacktrace differences

* rerun ci

* fix passing --parallel to runner-integration tests

* perf: faster lookup for hooks without runnables

* fix afterAll hook switch logic

* simplify mocha prototype patches

* fix decaf utils.coffee after merge

* backport to before/after fix

* backport to before/after fix 2

* cleanup from decaf, fix ui/package.json

* update helpers, simplify runner.spec

* fix lint-types errors, flaky spec

* fix noExit passed to e2e test inline options

* cleanup snapshot utility - refactor to use util file

* remove before/after changes

* make cy obj a class instance

* cleanup/unmerge before/after fixes PR...

* more cleanup

* add comment

* fix runner.spec

* cleanup snapshot utility more, cleanup reporter.spec

* fix after merge

* minor rename variable

* fix after merge: decaffed files

* fix specName in reporterHeader, spec_helper require

* replace reporter specPath usages with spec object from config

* cleanup, fix specs, fix types tests

* fix config spec paths in isolated runner, fix snapshot plugin button

* combine runner.spec.js and runner_spec.js

* fix incorrect merge

* minor minor cleanup

* rename driver/test/cypress to driver/test

* use yarn workspace over lerna for individual package commands

* add error message to driver start

* remove usage of wait-on

* update <reference types/>, import string

* fix driver/readme

* fix readmes after regex replace

* revert wait-on changes

* Revert "revert wait-on changes"

This reverts commit 6de684cf34.

* update yarn.lock

* fix broken path in spec

* fix broken paths in specs with @packages/driver

* move runner/test/cypress into runner/cypress

* start server in pluginsFile in runner/cypress tests

* fix more broken spec paths

* fix broken paths after runner/cypress folder move

* move type definition loading for driver/cypress into dedicated file

* move internal-types to "types" folder, fix driver/index.d.ts

* fix type-check in packages/runner. not exactly sure why

* fix runner type-check by excluding test folder in tsconfig

* bump timeout on e2e/8_error_ui_spec

* update snapshot utility, rename tests in runner/runner.spec, fix README yarn commands

* delete old spec

* fix snapshot naming, remove redundant test in reporter_spec

* fix file renames after merge

* rename runner/ snapshot

* update server/unit/reporter_spec snapshot

* update runner/runner_spec snapshot

* rename runner snapshot file

* address feedback: move server reporter snapshot specs out

* address feedback: add comment about exposing globals

* fix test-retries after merging isolated-runner

* fix runner/test helper, update snapshot

* address feedback: split out runner/retries spec, move reporter/ui tests to runner/ui spec (mostly done), various cleanup

* fix scrolling, attempt opening, update snapshots

* fix e2e support file

* fix 5_spec_isolation

* fix mislabeling attempt screenshots

* only add test results prevAttempts if exists

* fix reporter/unit tests, server/unit tests

* remove dead code, fix test isOpen

* update snapshots for retries.mochaEvents, fix snapshot error in state hydration test, remove dead snapshots

* new moduleAPI schema using attempts array, fix wrapping errors from hook retries, update snapshots

* add displayError, null out fields in moduleAPI schema

* change default retries to {runMode:2, openMode:0}

* fix reporter type-check

* upgrade json-schemas, update snapshots

* reformat error.stack to be only stacktrace, update snapshots

* fix stacktrace replacing in 5_spec_isolation

* fix navigation error causing infinite reloading, bump timeout on e2e/8_error_ui

* fix server/unit tests for new schema

* fix reporter/unit tests

* fix reporting duplicate screenshots using cy.screenshot during test retry

* update snapshot for 6_uncaught_support_file_spec

* bump x-route-version: 3

* fix test.tsx collapsible content, css, fix e2e/8_error_ui, e2e projects excluding retries

* fix css, fix padding in runnable-instruments, fix runner/integration tests

* fixup after merge

* fix reporter/runner to work with split hooks

* update api tests, runner/cypress tests, reporter

* fix 5_spec_isolation snapshots, fix runner/cypress errors.spec, fix null reference in test.tsx

* fix e2e/non_root spec, fix type_check, fix reporter/unit tests

* setup percy snapshots in runner/cypress, fix driver/runner test:after:run event, add tests for only,skip in runner/cypress, fix retried css

* add customPercySnapshot

* fix circle.yml

* fix circle.yml 2

* fix circle.yml 3

* add warning for incompatible retries plugin

* add more percy snapshots

* fix firefox screenshot resolution in e2e test

* Fix testConfigOverrides not affecting viewport (#8006)

* finish adding percy snapshots to runner/cypress retries spec, update error msgs, add tests to be fixed

* remove .only

* fixing missing repo argument

* fix testConfigOverrides usage with retries, fix test

* fix issues from previous merge

* add script that can query CircleCI workflow status

* add circleci job to poll

* add retries

* try yarn lock

* retry, percy finalize

* check for current running job

* do not swallow request error

* better print

* use job name from circle environment

* use debug instead

* renamed circle polling script

* refactor circle to conditionally run percy-finalize when env var is available

- pass job-names to wait on as an argument

* use multi-line strings and quote --job-names

- rename —circle-jobs to —job-names

* add comment

* only poll until the jobs to wait for are blocked or running

* fix running hooks at correct depth after attempt fails from hook and will retry, update e2e snapshots

* fix reporter/unit tests, remove unused toggleOpen code

* move custom percy command into @packages/ui-components and apply them to desktop-gui

* halt percy finalize job if env variable is not set

* if only I could code

* update runner/cypress mochaEvent snapshots, fix e2e firefox resolution

* fix css for attempt border-left, fix attempt-tag open/close icon, add color to attempt collapsible dot

* try percy set viewport width

* set default retries back to {runMode:0, openMode:0}

* formatting: add backticks to warning message

* write explicit test for screenshot overwriting behavior, fix snapshots after changing retries defaults

* fix e2e.it.only`

* cleanup whitespace

* update snapshots

* fix cypress module API types for new result schema

* build and upload binary for test-retries branch too (linux)

* add pre-release PR comment

* fix pre-release commit comment

* rename runner/cypress test

* update retries.ui.spec test titles

* fix after merge: use most recent attempt for before/after hooks

* add suite title to hook error in runner/cypress tests

Co-authored-by: Jennifer Shehane <jennifer@cypress.io>
Co-authored-by: Brian Mann <brian.mann86@gmail.com>
Co-authored-by: Gleb Bahmutov <gleb.bahmutov@gmail.com>
This commit is contained in:
Ben Kucera
2020-08-10 18:36:45 -04:00
committed by GitHub
parent 01cc5e96d0
commit 860a20af30
120 changed files with 12042 additions and 1818 deletions

View File

@@ -6,7 +6,8 @@
"plugin:@cypress/dev/general"
],
"rules": {
"prefer-spread": "off"
"prefer-spread": "off",
"prefer-rest-params": "off"
},
"settings": {
"react": {

View File

@@ -129,14 +129,22 @@ commands:
browser:
description: browser shortname to target
type: string
percy:
description: enable percy
type: boolean
default: false
steps:
- attach_workspace:
at: ~/
- run:
command: |
cmd=$([[ <<parameters.percy>> == 'true' ]] && echo 'yarn percy exec --') || true
CYPRESS_KONFIG_ENV=production \
CYPRESS_RECORD_KEY=$PACKAGES_RECORD_KEY \
yarn workspace @packages/runner cypress:run --record --parallel --group runner-integration-<<parameters.browser>> --browser <<parameters.browser>>
PERCY_PARALLEL_NONCE=$CIRCLE_WORKFLOW_ID \
PERCY_PARALLEL_TOTAL=-1 \
$cmd yarn workspace @packages/runner cypress:run --record --parallel --group runner-integration-<<parameters.browser>> --browser <<parameters.browser>>
- store_test_results:
path: /tmp/cypress
- store_artifacts:
@@ -211,6 +219,7 @@ commands:
command: |
git clone --depth 1 --no-single-branch https://github.com/cypress-io/<<parameters.repo>>.git /tmp/<<parameters.repo>>
cd /tmp/<<parameters.repo>> && (git checkout $NEXT_DEV_VERSION || true)
test-binary-against-repo:
description: |
Takes the built binary and NPM package, clones given example repo
@@ -337,6 +346,17 @@ commands:
path: /tmp/<<parameters.repo>>/cypress/videos
- store-npm-logs
wait-on-circle-jobs:
description: Polls certain Circle CI jobs until they finish
parameters:
job-names:
description: comma separated list of circle ci job names to wait for
type: string
steps:
- run:
name: "Waiting on Circle CI jobs: <<parameters.job-names>>"
command: node ./scripts/wait-on-circle-jobs.js --job-names="<<parameters.job-names>>"
jobs:
## code checkout and yarn installs
build:
@@ -400,6 +420,37 @@ jobs:
command: node cli/bin/cypress info --dev
- store-npm-logs
# a special job that keeps polling Circle and when all
# individual jobs are finished, it closes the Percy build
percy-finalize:
<<: *defaults
executor: cy-doc
parameters:
required_env_var:
type: env_var_name
steps:
- attach_workspace:
at: ~/
- run:
# if this is an external pull request, the environment variables
# are NOT set for security reasons, thus no need to poll -
# and no need to finalize Percy, since there will be no visual tests
name: Check if <<parameters.required_env_var>> is set
command: |
if [[ -v <<parameters.required_env_var>> ]]; then
echo "Internal PR, good to go"
else
echo "This is an external PR, cannot access other services"
circleci-agent step halt
fi
- wait-on-circle-jobs:
job-names: >
desktop-gui-integration-tests-2x,
desktop-gui-component-tests,
cli-visual-tests,
runner-integration-tests-chrome,
- run: npx percy finalize --all
cli-visual-tests:
<<: *defaults
parallelism: 1
@@ -701,6 +752,7 @@ jobs:
steps:
- run-runner-integration-tests:
browser: chrome
percy: true
runner-integration-tests-firefox:
<<: *defaults
@@ -879,13 +931,6 @@ jobs:
command: node index.js
working_directory: packages/launcher
percy-finalize:
<<: *defaults
steps:
- run:
name: "finalizes percy builds"
command: npx percy finalize --all
build-binary:
<<: *defaults
shell: /bin/bash --login
@@ -1369,6 +1414,11 @@ jobs:
command: npm run test:ci
pull_request_id: 515
folder: examples/fundamentals__typescript
- test-binary-against-repo:
repo: cypress-example-recipes
command: npm test
pull_request_id: 513
folder: examples/fundamentals__module-api-wrap
"test-binary-against-kitchensink":
<<: *defaults
@@ -1493,6 +1543,11 @@ linux-workflow: &linux-workflow
name: Linux lint
requires:
- build
- percy-finalize:
context: test-runner:poll-circle-workflow
required_env_var: PERCY_TOKEN # skips job if not defined (external PR)
requires:
- build
- lint-types:
requires:
- build
@@ -1624,12 +1679,6 @@ linux-workflow: &linux-workflow
requires:
- build
- percy-finalize:
requires:
- desktop-gui-integration-tests-2x
- desktop-gui-component-tests
- cli-visual-tests
# various testing scenarios, like building full binary
# and testing it on a real project
- test-against-staging:
@@ -1666,6 +1715,7 @@ linux-workflow: &linux-workflow
branches:
only:
- develop
- test-retries
requires:
- build-npm-package
- build-binary:
@@ -1677,6 +1727,7 @@ linux-workflow: &linux-workflow
branches:
only:
- develop
- test-retries
requires:
- build-binary
- test-npm-module-on-minimum-node-version:
@@ -1733,6 +1784,7 @@ linux-workflow: &linux-workflow
branches:
only:
- develop
- test-retries
requires:
- upload-npm-package
- upload-binary

View File

@@ -243,6 +243,18 @@
"type": "boolean",
"default": false,
"description": "Polyfills `window.fetch` to enable Network spying and stubbing"
},
"retries": {
"type": [
"object",
"number",
"null"
],
"default": {
"runMode": 0,
"openMode": 0
},
"description": "The number of times to retry a failing. Can be configured to apply only in runMode or openMode"
}
}
}

View File

@@ -7,6 +7,11 @@
// but for now describe it as an ambient module
declare namespace CypressCommandLine {
interface TestError {
name: string
message: string
stack: string
}
/**
* All options that one can pass to "cypress.run"
* @see https://on.cypress.io/module-api#cypress-run
@@ -166,14 +171,16 @@ declare namespace CypressCommandLine {
title: string[]
state: string
body: string
/**
* Error stack string if there is an error
/**
* Error string as it's presented in console if the test fails
*/
stack: string | null
/**
* Error message if there is an error
*/
error: string | null
displayError: string | null
attempts: AttemptResult[]
}
interface AttemptResult {
state: string
error: TestError | null
timings: any
failedFromHookId: hookId | null
wallClockStartedAt: dateTimeISO
@@ -199,6 +206,7 @@ declare namespace CypressCommandLine {
name: string
testId: testId
takenAt: dateTimeISO
testAttemptIndex: number
/**
* Absolute path to the saved image
*/

View File

@@ -334,6 +334,11 @@ declare namespace Cypress {
*/
getFirefoxGcInterval(): number | null | undefined
/**
* @returns the number of test retries currently enabled for the run
*/
getTestRetries(): number | null
/**
* Checks if a variable is a valid instance of `cy` or a `cy` chainable.
*
@@ -2563,6 +2568,13 @@ declare namespace Cypress {
* the `includeShadowDom` option to some DOM commands.
*/
experimentalShadowDomSupport: boolean
/**
* Number of times to retry a failed test.
* If a number is set, tests will retry in both runMode and openMode.
* To enable test retries only in runMode, set e.g. `{ openMode: null, runMode: 2 }`
* @default null
*/
retries: Nullable<number | {runMode: Nullable<number>, openMode: Nullable<number>}>
}
interface TestConfigOverrides extends Partial<Pick<ConfigOptions, 'baseUrl' | 'defaultCommandTimeout' | 'taskTimeout' | 'animationDistanceThreshold' | 'waitForAnimations' | 'viewportHeight' | 'viewportWidth' | 'requestTimeout' | 'execTimeout' | 'env' | 'responseTimeout'>> {

View File

@@ -136,6 +136,7 @@
"fs-extra": "8.1.0",
"gift": "0.10.2",
"globby": "10.0.1",
"got": "11.5.1",
"gulp": "4.0.2",
"gulp-awspublish": "4.0.0",
"gulp-debug": "4.0.0",

View File

@@ -1,4 +1,4 @@
require('@percy/cypress')
require('@packages/ui-components/cypress/support/customPercyCommand')
require('cypress-react-unit-test/dist/hooks')
const BluebirdPromise = require('bluebird')

View File

@@ -19,6 +19,7 @@ describe('src/cy/commands/screenshot', () => {
takenAt: new Date().toISOString(),
name: 'name',
blackout: ['.foo'],
testAttemptIndex: 0,
duration: 100,
}
@@ -49,7 +50,7 @@ describe('src/cy/commands/screenshot', () => {
Cypress.action('runner:runnable:after:run:async', test, runnable)
.then(() => {
expect(Cypress.action).not.to.be.calledWith('cy:test:set:state')
expect(Cypress.action).not.to.be.calledWith('test:set:state')
expect(Cypress.automation).not.to.be.called
})
.finally(() => {
@@ -68,7 +69,7 @@ describe('src/cy/commands/screenshot', () => {
Cypress.action('runner:runnable:after:run:async', test, runnable)
.then(() => {
expect(Cypress.action).not.to.be.calledWith('cy:test:set:state')
expect(Cypress.action).not.to.be.calledWith('test:set:state')
expect(Cypress.automation).not.to.be.called
})
})
@@ -89,7 +90,7 @@ describe('src/cy/commands/screenshot', () => {
Cypress.action('runner:runnable:after:run:async', test, runnable)
.then(() => {
expect(Cypress.action).not.to.be.calledWith('cy:test:set:state')
expect(Cypress.action).not.to.be.calledWith('test:set:state')
expect(Cypress.automation).not.to.be.called
})
})
@@ -137,6 +138,7 @@ describe('src/cy/commands/screenshot', () => {
waitForCommandSynchronization: true,
disableTimersAndAnimations: true,
blackout: [],
testAttemptIndex: 0,
})
expect(Cypress.action).to.be.calledWith('cy:after:screenshot', {
@@ -147,6 +149,7 @@ describe('src/cy/commands/screenshot', () => {
waitForCommandSynchronization: true,
disableTimersAndAnimations: true,
blackout: [],
testAttemptIndex: 0,
})
})
})
@@ -183,6 +186,7 @@ describe('src/cy/commands/screenshot', () => {
testFailure: true,
blackout: [],
scaled: true,
testAttemptIndex: 0,
})
})
})
@@ -225,6 +229,7 @@ describe('src/cy/commands/screenshot', () => {
simple: false,
scaled: true,
blackout: [],
testAttemptIndex: 0,
})
})
})
@@ -264,6 +269,7 @@ describe('src/cy/commands/screenshot', () => {
testFailure: true,
scaled: true,
blackout: [],
testAttemptIndex: 0,
})
})
})
@@ -406,6 +412,7 @@ describe('src/cy/commands/screenshot', () => {
waitForCommandSynchronization: false,
disableTimersAndAnimations: true,
blackout: ['.foo'],
testAttemptIndex: 0,
})
})
})
@@ -425,6 +432,7 @@ describe('src/cy/commands/screenshot', () => {
waitForCommandSynchronization: false,
disableTimersAndAnimations: true,
blackout: ['.foo'],
testAttemptIndex: 0,
})
})
})
@@ -446,6 +454,7 @@ describe('src/cy/commands/screenshot', () => {
waitForCommandSynchronization: true,
disableTimersAndAnimations: true,
blackout: [],
testAttemptIndex: 0,
})
})
})
@@ -466,6 +475,7 @@ describe('src/cy/commands/screenshot', () => {
waitForCommandSynchronization: false,
disableTimersAndAnimations: true,
blackout: ['.foo'],
testAttemptIndex: 0,
})
})
})

View File

@@ -18,7 +18,12 @@
"@cypress/what-is-circular": "1.0.1",
"@packages/network": "*",
"@packages/runner": "*",
"@packages/server": "*",
"@packages/ts": "*",
"@types/chalk": "^2.2.0",
"@types/common-tags": "^1.8.0",
"@types/lodash": "^4.14.123",
"@types/mocha": "^5.2.6",
"angular": "1.8.0",
"backbone": "1.4.0",
"basic-auth": "2.0.1",

View File

@@ -842,6 +842,9 @@ module.exports = (Commands, Cypress, cy, state, config) => {
if (previousDomainVisited && (remote.originPolicy !== existing.originPolicy)) {
// if we've already visited a new superDomain
// then die else we'd be in a terrible endless loop
// we also need to disable retries to prevent the endless loop
$utils.getTestFromRunnable(state('runnable'))._retries = 0
return cannotVisitDifferentOrigin(remote.origin, previousDomainVisited, remote, existing, options._log)
}

View File

@@ -7,6 +7,7 @@ const Promise = require('bluebird')
const $Screenshot = require('../../cypress/screenshot')
const $dom = require('../../dom')
const $errUtils = require('../../cypress/error_utils')
const $utils = require('../../cypress/utils')
const getViewportHeight = (state) => {
// TODO this doesn't seem correct
@@ -54,6 +55,7 @@ const automateScreenshot = (state, options = {}) => {
titles,
testId: runnable.id,
takenPaths: state('screenshotPaths'),
testAttemptIndex: $utils.getTestFromRunnable(runnable)._currentRetry,
}, _.omit(options, 'runnable', 'timeout', 'log', 'subject'))
const automate = () => {
@@ -304,6 +306,7 @@ const takeScreenshot = (Cypress, state, screenshotConfig, options = {}) => {
const getOptions = (isOpen) => {
return {
id: runnable.id,
testAttemptIndex: $utils.getTestFromRunnable(runnable)._currentRetry,
isOpen,
appOnly: isAppOnly(screenshotConfig),
scale: getShouldScale(screenshotConfig),

View File

@@ -165,6 +165,19 @@ class $Cypress {
this.config = $SetterGetter.create(config)
this.env = $SetterGetter.create(env)
this.getFirefoxGcInterval = $FirefoxForcedGc.createIntervalGetter(this.config)
this.getTestRetries = function () {
const testRetries = this.config('retries')
if (_.isNumber(testRetries)) {
return testRetries
}
if (_.isObject(testRetries)) {
return testRetries[this.config('isInteractive') ? 'openMode' : 'runMode']
}
return null
}
this.Cookies = $Cookies.create(config.namespace, d)
@@ -269,11 +282,6 @@ class $Cypress {
break
case 'runner:set:runnable':
// when there is a hook / test (runnable) that
// is about to be invoked
return this.cy.setRunnable(...args)
case 'runner:suite:start':
// mocha runner started processing a suite
if (this.config('isTextTerminal')) {
@@ -323,6 +331,8 @@ class $Cypress {
case 'runner:pass':
// mocha runner calculated a pass
// this is delayed from when mocha would normally fire it
// since we fire it after all afterEach hooks have ran
if (this.config('isTextTerminal')) {
return this.emit('mocha', 'pass', ...args)
}
@@ -359,6 +369,16 @@ class $Cypress {
break
}
// retry event only fired in mocha version 6+
// https://github.com/mochajs/mocha/commit/2a76dd7589e4a1ed14dd2a33ab89f182e4c4a050
case 'runner:retry': {
// mocha runner calculated a pass
if (this.config('isTextTerminal')) {
this.emit('mocha', 'retry', ...args)
}
break
}
case 'mocha:runnable:run':
return this.runner.onRunnableRun(...args)
@@ -367,7 +387,14 @@ class $Cypress {
// get back to a clean slate
this.cy.reset(...args)
return this.emit('test:before:run', ...args)
if (this.config('isTextTerminal')) {
// needed for handling test retries
this.emit('mocha', 'test:before:run', args[0])
}
this.emit('test:before:run', ...args)
break
case 'runner:test:before:run:async':
// TODO: handle timeouts here? or in the runner?

View File

@@ -1282,6 +1282,8 @@ const create = function (specWindow, Cypress, Cookies, state, config, log) {
state('runnable', runnable)
state('test', $utils.getTestFromRunnable(runnable))
state('ctx', runnable.ctx)
const { fn } = runnable

View File

@@ -884,6 +884,33 @@ module.exports = {
{{error}}`,
docsUrl: 'https://on.cypress.io/returning-promise-and-invoking-done-callback',
},
manually_set_retries_test: stripIndent`\
Mocha \`this.retries()\` syntax is not supported.
To configure retries use the following syntax:
\`\`\`
it('{{title}}', { retries: {{numRetries}} }, () => {
...
})
\`\`\`
https://on.cypress.io/test-retries
`,
manually_set_retries_suite: stripIndent`\
Mocha \`this.retries()\` syntax is not supported.
To configure retries use the following syntax:
\`\`\`
describe('{{title}}', { retries: {{numRetries}} }, () => {
...
})
\`\`\`
https://on.cypress.io/test-retries
`,
},
navigation: {
@@ -1595,6 +1622,10 @@ module.exports = {
msg += 'all of the remaining tests.'
}
if ((obj.hookName === 'after all' || obj.hookName === 'before all') && obj.retries > 0) {
msg += `\n\nAlthough you have test retries enabled, we do not retry tests when \`before all\` or \`after all\` hooks fail`
}
return msg
},
error (obj) {

View File

@@ -13,7 +13,7 @@ const $errUtils = require('./error_utils')
const groupsOrTableRe = /^(groups|table)$/
const parentOrChildRe = /parent|child/
const SNAPSHOT_PROPS = 'id snapshots $el url coords highlightAttr scrollBy viewportWidth viewportHeight'.split(' ')
const DISPLAY_PROPS = 'id alias aliasType callCount displayName end err event functionName hookId instrument isStubbed message method name numElements numResponses referencesAlias renderProps state testId timeout type url visible wallClockStartedAt'.split(' ')
const DISPLAY_PROPS = 'id alias aliasType callCount displayName end err event functionName hookId instrument isStubbed message method name numElements numResponses referencesAlias renderProps state testId timeout type url visible wallClockStartedAt testCurrentRetry'.split(' ')
const BLACKLIST_PROPS = 'snapshots'.split(' ')
let delay = null
@@ -90,10 +90,12 @@ const countLogsByTests = function (tests = {}) {
return _
.chain(tests)
.map((test, key) => {
return [].concat(test.agents, test.routes, test.commands)
}).flatten()
.compact()
.flatMap((test) => {
return [test, test.prevAttempts]
})
.flatMap((tests) => {
return [].concat(tests.agents, tests.routes, tests.commands)
}).compact()
.union([{ id: 0 }])
.map('id')
.max()
@@ -167,6 +169,16 @@ const defaults = function (state, config, obj) {
const runnable = state('runnable')
const getTestAttemptFromRunnable = (runnable) => {
if (!runnable) {
return
}
const t = $utils.getTestFromRunnable(runnable)
return t._currentRetry || 0
}
return _.defaults(obj, {
id: (counter += 1),
state: 'pending',
@@ -174,6 +186,7 @@ const defaults = function (state, config, obj) {
url: state('url'),
hookId: state('hookId'),
testId: runnable ? runnable.id : undefined,
testCurrentRetry: getTestAttemptFromRunnable(state('runnable')),
viewportWidth: state('viewportWidth'),
viewportHeight: state('viewportHeight'),
referencesAlias: undefined,

View File

@@ -1,5 +1,8 @@
/* eslint-disable prefer-rest-params */
const _ = require('lodash')
const $errUtils = require('./error_utils')
const { getTestFromRunnable } = require('./utils')
const $stackUtils = require('./stack_utils')
// in the browser mocha is coming back
@@ -7,13 +10,19 @@ const $stackUtils = require('./stack_utils')
const mocha = require('mocha')
const Mocha = mocha.Mocha != null ? mocha.Mocha : mocha
const { Test, Runner, Runnable } = Mocha
const { Test, Runner, Runnable, Hook, Suite } = Mocha
const runnerRun = Runner.prototype.run
const runnerFail = Runner.prototype.fail
const runnerRunTests = Runner.prototype.runTests
const runnableRun = Runnable.prototype.run
const runnableClearTimeout = Runnable.prototype.clearTimeout
const runnableResetTimeout = Runnable.prototype.resetTimeout
const testRetries = Test.prototype.retries
const testClone = Test.prototype.clone
const suiteAddTest = Suite.prototype.addTest
const suiteRetries = Suite.prototype.retries
const hookRetries = Hook.prototype.retries
// don't let mocha polute the global namespace
delete window.mocha
@@ -241,6 +250,62 @@ const restoreRunnableRun = () => {
Runnable.prototype.run = runnableRun
}
const restoreSuiteRetries = () => {
Suite.prototype.retries = suiteRetries
}
function restoreTestClone () {
Test.prototype.clone = testClone
}
function restoreRunnerRunTests () {
Runner.prototype.runTests = runnerRunTests
}
function restoreSuiteAddTest () {
Mocha.Suite.prototype.addTest = suiteAddTest
}
const restoreHookRetries = () => {
Hook.prototype.retries = hookRetries
}
const patchSuiteRetries = () => {
Suite.prototype.retries = function (...args) {
if (args[0] !== undefined && args[0] > -1) {
const err = $errUtils.cypressErrByPath('mocha.manually_set_retries_suite', {
args: {
title: this.title,
numRetries: args[0] ?? 2,
},
})
throw err
}
return suiteRetries.apply(this, args)
}
}
const patchHookRetries = () => {
Hook.prototype.retries = function (...args) {
if (args[0] !== undefined && args[0] > -1) {
const err = $errUtils.cypressErrByPath('mocha.manually_set_retries_suite', {
args: {
title: this.parent.title,
numRetries: args[0] ?? 2,
},
})
// so this error doesn't cause a retry
getTestFromRunnable(this)._retries = -1
throw err
}
return hookRetries.apply(this, args)
}
}
// matching the current Runner.prototype.fail except
// changing the logic for determing whether this is a valid err
const patchRunnerFail = () => {
@@ -274,6 +339,50 @@ const patchRunnableRun = (Cypress) => {
}
}
function patchTestClone () {
Test.prototype.clone = function () {
if (this._retriesBeforeEachFailedTestFn) {
this.fn = this._retriesBeforeEachFailedTestFn
}
const ret = testClone.apply(this, arguments)
// carry over testConfigOverrides
ret.cfg = this.cfg
// carry over test.id
ret.id = this.id
return ret
}
}
function patchRunnerRunTests () {
Runner.prototype.runTests = function () {
const suite = arguments[0]
const _slice = suite.tests.slice
// HACK: we need to dynamically enqueue tests to suite.tests during a test run
// however Mocha calls `.slice` on this property and thus we no longer have a reference
// to the internal test queue. So we replace the .slice method
// in a way that we keep a reference to the returned array. we name it suite.testsQueue
suite.tests.slice = function () {
this.slice = _slice
const ret = _slice.apply(this, arguments)
suite.testsQueue = ret
return ret
}
const ret = runnerRunTests.apply(this, arguments)
return ret
}
}
const patchRunnableClearTimeout = () => {
Runnable.prototype.clearTimeout = function (...args) {
// call the original
@@ -283,6 +392,34 @@ const patchRunnableClearTimeout = () => {
}
}
function patchSuiteAddTest (Cypress) {
Mocha.Suite.prototype.addTest = function (...args) {
const test = args[0]
const ret = suiteAddTest.apply(this, args)
test.retries = function (...args) {
if (args[0] !== undefined && args[0] > -1) {
const err = $errUtils.cypressErrByPath('mocha.manually_set_retries_test', {
args: {
title: test.title,
numRetries: args[0] ?? 2,
},
})
// so this error doesn't cause a retry
test._retries = -1
throw err
}
return testRetries.apply(this, args)
}
return ret
}
}
const patchRunnableResetTimeout = () => {
Runnable.prototype.resetTimeout = function () {
const runnable = this
@@ -319,6 +456,11 @@ const restore = () => {
restoreRunnableRun()
restoreRunnableClearTimeout()
restoreRunnableResetTimeout()
restoreSuiteRetries()
restoreHookRetries()
restoreRunnerRunTests()
restoreTestClone()
restoreSuiteAddTest()
}
const override = (Cypress) => {
@@ -326,6 +468,11 @@ const override = (Cypress) => {
patchRunnableRun(Cypress)
patchRunnableClearTimeout()
patchRunnableResetTimeout()
patchSuiteRetries()
patchHookRetries()
patchRunnerRunTests()
patchTestClone()
patchSuiteAddTest(Cypress)
}
const create = (specWindow, Cypress, config) => {

View File

@@ -18,7 +18,7 @@ const TEST_BEFORE_RUN_EVENT = 'runner:test:before:run'
const TEST_AFTER_RUN_EVENT = 'runner:test:after:run'
const RUNNABLE_LOGS = 'routes agents commands hooks'.split(' ')
const RUNNABLE_PROPS = 'id order title root hookName hookId err state failedFromHookId body speed type duration wallClockStartedAt wallClockDuration timings file originalTitle invocationDetails'.split(' ')
const RUNNABLE_PROPS = 'id order title root hookName hookId err state failedFromHookId body speed type duration wallClockStartedAt wallClockDuration timings file originalTitle invocationDetails final currentRetry retries'.split(' ')
const debug = require('debug')('cypress:driver:runner')
@@ -54,9 +54,8 @@ const runnableAfterRunAsync = (runnable, Cypress) => {
runnable.clearTimeout()
return Promise.try(() => {
if (!fired('runner:runnable:after:run:async', runnable)) {
return fire('runner:runnable:after:run:async', runnable, Cypress)
}
// NOTE: other events we do not fire more than once, but this needed to change for test-retries
return fire('runner:runnable:after:run:async', runnable, Cypress)
})
}
@@ -224,12 +223,14 @@ const findLastTestInSuite = (suite, fn = _.identity) => {
const getAllSiblingTests = (suite, getTestById) => {
const tests = []
suite.eachTest((test) => {
suite.eachTest((testRunnable) => {
// iterate through each of our suites tests.
// this will iterate through all nested tests
// as well. and then we add it only if its
// in our filtered tests array
if (getTestById(test.id)) {
const test = getTestById(testRunnable.id)
if (test) {
return tests.push(test)
}
})
@@ -291,7 +292,7 @@ const lastTestThatWillRunInSuite = (test, tests) => {
}
const isLastTest = (test, tests) => {
return test === _.last(tests)
return test.id === _.get(_.last(tests), 'id')
}
const isRootSuite = (suite) => {
@@ -308,73 +309,94 @@ const overrideRunnerHook = (Cypress, _runner, getTestById, getTest, setTest, get
// monkey patch the hook event so we can wrap
// 'test:after:run' around all of
// the hooks surrounding a test runnable
const _runnerHook = _runner.hook
// const _runnerHook = _runner.hook
_runner.hook = function (name, fn) {
const allTests = getTests()
const changeFnToRunAfterHooks = () => {
const originalFn = fn
const test = getTest()
fn = function () {
setTest(null)
testAfterRun(test, Cypress)
// and now invoke next(err)
return originalFn.apply(window, arguments)
}
_runner.hook = $utils.monkeypatchBefore(_runner.hook, function (name, fn) {
if (name !== 'afterAll' && name !== 'afterEach') {
return
}
const test = getTest()
const allTests = getTests()
let shouldFireTestAfterRun = _.noop
switch (name) {
case 'afterEach': {
const t = getTest()
case 'afterEach':
shouldFireTestAfterRun = () => {
// find all of the grep'd tests which share
// the same parent suite as our current test
const tests = getAllSiblingTests(test.parent, getTestById)
// find all of the filtered _tests which share
// the same parent suite as our current _test
const tests = getAllSiblingTests(t.parent, getTestById)
if (this.suite.root) {
_runner._shouldBufferSuiteEnd = true
// make sure this test isnt the last test overall but also
// isnt the last test in our filtered parent suite's tests array
if (this.suite.root && (t !== _.last(allTests)) && (t !== _.last(tests))) {
changeFnToRunAfterHooks()
}
break
}
case 'afterAll': {
// find all of the filtered allTests which share
// the same parent suite as our current _test
const t = getTest()
if (t) {
const siblings = getAllSiblingTests(t.parent, getTestById)
// 1. if we're the very last test in the entire allTests
// we wait until the root suite fires
// 2. else if we arent the last nested suite we fire if we're
// the last test that will run
if (
(isRootSuite(this.suite) && isLastTest(t, allTests)) ||
(!isLastSuite(this.suite, allTests) && lastTestThatWillRunInSuite(t, siblings))
) {
changeFnToRunAfterHooks()
// make sure this test isnt the last test overall but also
// isnt the last test in our filtered parent suite's tests array
if (test.final === false || (test !== _.last(allTests)) && (test !== _.last(tests))) {
return true
}
}
}
break
case 'afterAll':
shouldFireTestAfterRun = () => {
// find all of the filtered allTests which share
// the same parent suite as our current _test
// const t = getTest()
if (test) {
const siblings = getAllSiblingTests(test.parent, getTestById)
// 1. if we're the very last test in the entire allTests
// we wait until the root suite fires
// 2. else if we arent the last nested suite we fire if we're
// the last test that will run
if (
(isRootSuite(this.suite) && isLastTest(test, allTests)) ||
(!isLastSuite(this.suite, allTests) && lastTestThatWillRunInSuite(test, siblings))
) {
return true
}
}
}
break
}
default:
break
}
return _runnerHook.call(this, name, fn)
}
const newArgs = [name, $utils.monkeypatchBefore(fn,
function () {
if (!shouldFireTestAfterRun()) return
setTest(null)
if (test.final !== false) {
test.final = true
if (test.state === 'passed') {
Cypress.action('runner:pass', wrap(test))
}
Cypress.action('runner:test:end', wrap(test))
_runner._shouldBufferSuiteEnd = false
_runner._onTestAfterRun.map((fn) => {
return fn()
})
_runner._onTestAfterRun = []
}
testAfterRun(test, Cypress)
})]
return newArgs
})
}
const getTestResults = (tests) => {
@@ -443,8 +465,6 @@ const normalizeAll = (suite, initialTests = {}, setTestsById, setTests, onRunnab
const normalize = (runnable, tests, initialTests, onRunnable, onLogsById, getTestId, getHookId) => {
const normalizeRunnable = (runnable) => {
let i
runnable.id = getTestId()
// tests have a type of 'test' whereas suites do not have a type property
@@ -458,8 +478,27 @@ const normalize = (runnable, tests, initialTests, onRunnable, onLogsById, getTes
// if we have a runnable in the initial state
// then merge in existing properties into the runnable
i = initialTests[runnable.id]
const i = initialTests[runnable.id]
let prevAttempts
if (i) {
prevAttempts = []
if (i.prevAttempts) {
prevAttempts = _.map(i.prevAttempts, (test) => {
if (test) {
_.each(RUNNABLE_LOGS, (type) => {
return _.each(test[type], onLogsById)
})
}
// reduce this runnable down to its props
// and collections
return wrapAll(test)
})
}
_.each(RUNNABLE_LOGS, (type) => {
return _.each(i[type], onLogsById)
})
@@ -472,7 +511,13 @@ const normalize = (runnable, tests, initialTests, onRunnable, onLogsById, getTes
// reduce this runnable down to its props
// and collections
return wrapAll(runnable)
const test = wrapAll(runnable)
if (prevAttempts) {
test.prevAttempts = prevAttempts
}
return test
}
const push = (test) => {
@@ -506,7 +551,7 @@ const normalize = (runnable, tests, initialTests, onRunnable, onLogsById, getTes
normalizedSuite.tests = _.map(suite._onlyTests, (test) => {
const normalizedTest = normalizeRunnable(test, initialTests, onRunnable, onLogsById, getTestId, getHookId)
push(normalizedTest)
push(test)
return normalizedTest
})
@@ -541,17 +586,13 @@ const normalize = (runnable, tests, initialTests, onRunnable, onLogsById, getTes
return normalizedRunnable
}
const hookFailed = (hook, err, hookName, getTest, getTestFromHookOrFindTest) => {
const hookFailed = (hook, err, getTest, getTestFromHookOrFindTest) => {
// NOTE: sometimes mocha will fail a hook without having emitted on('hook')
// event, so this hook might not have currentTest set correctly
// in which case we need to lookup the test
const test = getTest() || getTestFromHookOrFindTest(hook)
test.err = err
test.state = 'failed'
test.duration = hook.duration // TODO: nope (?)
test.hookName = hookName // TODO: why are we doing this?
test.failedFromHookId = hook.hookId
setHookFailureProps(test, hook, err)
if (hook.alreadyEmittedMocha) {
test.alreadyEmittedMocha = true
@@ -560,6 +601,17 @@ const hookFailed = (hook, err, hookName, getTest, getTestFromHookOrFindTest) =>
}
}
const setHookFailureProps = (test, hook, err) => {
err = $errUtils.wrapErr(err)
const hookName = getHookName(hook)
test.err = err
test.state = 'failed'
test.duration = hook.duration // TODO: nope (?)
test.hookName = hookName // TODO: why are we doing this?
test.failedFromHookId = hook.hookId
}
function getTestFromRunnable (runnable) {
switch (runnable.type) {
case 'hook':
@@ -594,18 +646,31 @@ const _runnerListeners = (_runner, Cypress, _emissions, getTestById, getTest, se
return Cypress.action('runner:suite:start', wrap(suite))
})
_runner.on('suite end', (suite) => {
// cleanup our suite + its hooks
forceGc(suite)
eachHookInSuite(suite, forceGc)
_runner._shouldBufferSuiteEnd = false
_runner._onTestAfterRun = []
_runner.on('suite end', (suite) => {
const handleSuiteEnd = () => {
// cleanup our suite + its hooks
forceGc(suite)
eachHookInSuite(suite, forceGc)
if (_emissions.ended[suite.id]) {
return
}
_emissions.ended[suite.id] = true
Cypress.action('runner:suite:end', wrap(suite))
}
if (_runner._shouldBufferSuiteEnd) {
_runner._onTestAfterRun = _runner._onTestAfterRun.concat([handleSuiteEnd])
if (_emissions.ended[suite.id]) {
return
}
_emissions.ended[suite.id] = true
return Cypress.action('runner:suite:end', wrap(suite))
return handleSuiteEnd()
})
_runner.on('hook', (hook) => {
@@ -665,11 +730,22 @@ const _runnerListeners = (_runner, Cypress, _emissions, getTestById, getTest, se
_emissions.ended[test.id] = true
return Cypress.action('runner:test:end', wrap(test))
// NOTE: we wait to send 'test end' until after hooks run
// return Cypress.action('runner:test:end', wrap(test))
})
_runner.on('pass', (test) => {
return Cypress.action('runner:pass', wrap(test))
// Ignore the 'pass' event since we emit our own
// _runner.on('pass', (test) => {
// return Cypress.action('runner:pass', wrap(test))
// })
/**
* Mocha retry event is only fired in Mocha version 6+
* https://github.com/mochajs/mocha/commit/2a76dd7589e4a1ed14dd2a33ab89f182e4c4a050
*/
_runner.on('retry', (test, err) => {
test.err = $errUtils.wrapErr(err)
Cypress.action('runner:retry', wrap(test), test.err)
})
// if a test is pending mocha will only
@@ -702,11 +778,13 @@ const _runnerListeners = (_runner, Cypress, _emissions, getTestById, getTest, se
const tests = getAllSiblingTests(test.parent, getTestById)
if (_.last(tests) !== test) {
test.final = true
return fire(TEST_AFTER_RUN_EVENT, test, Cypress)
}
})
return _runner.on('fail', (runnable, err) => {
_runner.on('fail', (runnable, err) => {
let hookName
const isHook = runnable.type === 'hook'
@@ -716,6 +794,7 @@ const _runnerListeners = (_runner, Cypress, _emissions, getTestById, getTest, se
const parentTitle = runnable.parent.title
hookName = getHookName(runnable)
const test = getTest() || getTestFromHookOrFindTest(runnable)
// append a friendly message to the error indicating
// we're skipping the remaining tests in this suite
@@ -724,6 +803,7 @@ const _runnerListeners = (_runner, Cypress, _emissions, getTestById, getTest, se
$errUtils.errByPath('uncaught.error_in_hook', {
parentTitle,
hookName,
retries: test._retries,
}).message,
)
}
@@ -751,7 +831,7 @@ const _runnerListeners = (_runner, Cypress, _emissions, getTestById, getTest, se
// if a hook fails (such as a before) then the test will never
// get run and we'll need to make sure we set the test so that
// the TEST_AFTER_RUN_EVENT fires correctly
return hookFailed(runnable, runnable.err, hookName, getTest, getTestFromHookOrFindTest)
return hookFailed(runnable, runnable.err, getTest, getTestFromHookOrFindTest)
}
})
}
@@ -779,13 +859,18 @@ const create = (specWindow, mocha, Cypress, cy) => {
const suite = hook.parent
let foundTest
if (hook.hookName === 'after all') {
return findLastTestInSuite(suite, isNotAlreadyRunTest)
foundTest = findLastTestInSuite(suite, isNotAlreadyRunTest)
} else if (hook.hookName === 'before all') {
foundTest = findTestInSuite(suite, isNotAlreadyRunTest)
}
if (hook.hookName === 'before all') {
return findTestInSuite(suite, isNotAlreadyRunTest)
}
// if test has retried, we getTestById will give us the last attempt
foundTest = foundTest && getTestById(foundTest.id)
return foundTest
}
const onScriptError = (err) => {
@@ -837,6 +922,7 @@ const create = (specWindow, mocha, Cypress, cy) => {
let _testsById = {}
const _testsQueue = []
const _testsQueueById = {}
// only used during normalization
const _runnables = []
const _logsById = {}
let _emissions = {
@@ -867,6 +953,7 @@ const create = (specWindow, mocha, Cypress, cy) => {
}
const onRunnable = (r) => {
// set defualt retries at onRunnable time instead of onRunnableRun
return _runnables.push(r)
}
@@ -891,8 +978,103 @@ const create = (specWindow, mocha, Cypress, cy) => {
return _testsById[id]
}
const replaceRunnable = (runnable, id) => {
const testsQueueIndex = _.findIndex(_testsQueue, { id })
_testsQueue.splice(testsQueueIndex, 1, runnable)
_testsQueueById[id] = runnable
const testsIndex = _.findIndex(_tests, { id })
_tests.splice(testsIndex, 1, runnable)
_testsById[id] = runnable
}
overrideRunnerHook(Cypress, _runner, getTestById, getTest, setTest, getTests)
// this forces mocha to enqueue a duplicate test in the case of test retries
const replacePreviousAttemptWith = (test) => {
const prevAttempt = _testsById[test.id]
const prevAttempts = prevAttempt.prevAttempts || []
const newPrevAttempts = prevAttempts.concat([prevAttempt])
delete prevAttempt.prevAttempts
test.prevAttempts = newPrevAttempts
replaceRunnable(test, test.id)
}
const maybeHandleRetry = (runnable, err) => {
const r = runnable
const isHook = r.type === 'hook'
const isTest = r.type === 'test'
const test = getTest() || getTestFromHook(runnable, getTestById)
const isBeforeEachHook = isHook && !!r.hookName.match(/before each/)
const isAfterEachHook = isHook && !!r.hookName.match(/after each/)
const retryAbleRunnable = isTest || isBeforeEachHook || isAfterEachHook
const willRetry = (test._currentRetry < test._retries) && retryAbleRunnable
const fail = function () {
return err
}
const noFail = function () {
return
}
if (err) {
if (willRetry) {
test.state = 'failed'
test.final = false
}
if (willRetry && isBeforeEachHook) {
delete runnable.err
test._retriesBeforeEachFailedTestFn = test.fn
// this prevents afterEach hooks that exist at a deeper level than the failing one from running
// we will always skip remaining beforeEach hooks since they will always be same level or deeper
test._skipHooksWithLevelGreaterThan = runnable.titlePath().length
setHookFailureProps(test, runnable, err)
test.fn = function () {
throw err
}
return noFail()
}
if (willRetry && isAfterEachHook) {
// if we've already failed this attempt from an afterEach hook then we've already enqueud another attempt
// so return early
if (test._retriedFromAfterEachHook) {
return noFail()
}
setHookFailureProps(test, runnable, err)
const newTest = test.clone()
newTest._currentRetry = test._currentRetry + 1
test.parent.testsQueue.unshift(newTest)
// this prevents afterEach hooks that exist at a deeper (or same) level than the failing one from running
test._skipHooksWithLevelGreaterThan = runnable.titlePath().length - 1
test._retriedFromAfterEachHook = true
Cypress.action('runner:retry', wrap(test), test.err)
return noFail()
}
}
return fail()
}
return {
onScriptError,
@@ -961,6 +1143,12 @@ const create = (specWindow, mocha, Cypress, cy) => {
return _next()
}
// first time seeing a retried test
// that hasn't already replaced our test
if (test._currentRetry > 0 && _testsById[test.id] !== test) {
replacePreviousAttemptWith(test)
}
// closure for calculating the actual
// runtime of a runnables fn exection duration
// and also the run of the runnable:after:run:async event
@@ -998,6 +1186,27 @@ const create = (specWindow, mocha, Cypress, cy) => {
// associated _runnables will share this state
if (!fired(TEST_BEFORE_RUN_EVENT, test)) {
fire(TEST_BEFORE_RUN_EVENT, test, Cypress)
// this is the earliest we can set test._retries since test:before:run
// will load in testConfigOverrides (per test configuration)
const retries = Cypress.getTestRetries() ?? -1
test._retries = retries
}
const isHook = runnable.type === 'hook'
const isAfterEachHook = isHook && runnable.hookName.match(/after each/)
const isBeforeEachHook = isHook && runnable.hookName.match(/before each/)
// if we've been told to skip hooks at a certain nested level
// this happens if we're handling a runnable that is going to retry due to failing in a hook
const shouldSkipRunnable = test._skipHooksWithLevelGreaterThan != null
&& isHook
&& (isBeforeEachHook || isAfterEachHook && runnable.titlePath().length > test._skipHooksWithLevelGreaterThan)
if (shouldSkipRunnable) {
return _next.call(this)
}
const next = (err) => {
@@ -1038,7 +1247,7 @@ const create = (specWindow, mocha, Cypress, cy) => {
break
}
return _next(err)
return _next.call(runnable, err)
}
const onNext = (err) => {
@@ -1058,6 +1267,8 @@ const create = (specWindow, mocha, Cypress, cy) => {
runnable.err = $errUtils.wrapErr(err)
}
err = maybeHandleRetry(runnable, err)
return runnableAfterRunAsync(runnable, Cypress)
.then(() => {
// once we complete callback with the
@@ -1163,21 +1374,13 @@ const create = (specWindow, mocha, Cypress, cy) => {
// search through all of the tests
// until we find the current test
// and break then
for (let test of _tests) {
if (test.id === id) {
for (let testRunnable of _tests) {
if (testRunnable.id === id) {
break
} else {
test = wrapAll(test)
const test = serializeTest(testRunnable)
_.each(RUNNABLE_LOGS, (type) => {
let logs
logs = test[type]
if (logs) {
test[type] = _.map(logs, $Log.toSerializedJSON)
}
})
test.prevAttempts = _.map(testRunnable.prevAttempts, serializeTest)
tests[test.id] = test
}
@@ -1210,9 +1413,7 @@ const create = (specWindow, mocha, Cypress, cy) => {
getDisplayPropsForLog: $Log.getDisplayProps,
getConsolePropsForLogById (logId) {
let attrs
attrs = _logsById[logId]
const attrs = _logsById[logId]
if (attrs) {
return $Log.getConsoleProps(attrs)
@@ -1220,25 +1421,13 @@ const create = (specWindow, mocha, Cypress, cy) => {
},
getSnapshotPropsForLogById (logId) {
let attrs
attrs = _logsById[logId]
const attrs = _logsById[logId]
if (attrs) {
return $Log.getSnapshotProps(attrs)
}
},
getErrorByTestId (testId) {
let test
test = getTestById(testId)
if (test) {
return $errUtils.wrapErr(test.err)
}
},
resumeAtTest (id, emissions = {}) {
_resumedAtTestIndex = getTestIndexFromId(id)
@@ -1287,7 +1476,6 @@ const create = (specWindow, mocha, Cypress, cy) => {
// we dont need to hold a log reference
// to anything in memory when we're headless
// because you cannot inspect any logs
let existing
if (!isInteractive) {
return
@@ -1308,7 +1496,7 @@ const create = (specWindow, mocha, Cypress, cy) => {
_testsQueue.push(test)
}
existing = _logsById[attrs.id]
const existing = _logsById[attrs.id]
if (existing) {
// because log:state:changed may
@@ -1343,6 +1531,24 @@ const create = (specWindow, mocha, Cypress, cy) => {
}
}
const mixinLogs = (test) => {
_.each(RUNNABLE_LOGS, (type) => {
const logs = test[type]
if (logs) {
test[type] = _.map(logs, $Log.toSerializedJSON)
}
})
}
const serializeTest = (test) => {
const wrappedTest = wrapAll(test)
mixinLogs(wrappedTest)
return wrappedTest
}
module.exports = {
create,
}

View File

@@ -3,6 +3,7 @@ const { codeFrameColumns } = require('@babel/code-frame')
const errorStackParser = require('error-stack-parser')
const path = require('path')
const { getStackLines, replacedStack, stackWithoutMessage, splitStack, unsplitStack } = require('@packages/server/lib/util/stack_utils')
const $sourceMapUtils = require('./source_map_utils')
const $utils = require('./utils')
@@ -11,36 +12,6 @@ const stackLineRegex = /^\s*(at )?.*@?\(?.*\:\d+\:\d+\)?$/
const customProtocolRegex = /^[^:\/]+:\/+/
const STACK_REPLACEMENT_MARKER = '__stackReplacementMarker'
// returns tuple of [message, stack]
const splitStack = (stack) => {
const lines = stack.split('\n')
return _.reduce(lines, (memo, line) => {
if (memo.messageEnded || stackLineRegex.test(line)) {
memo.messageEnded = true
memo[1].push(line)
} else {
memo[0].push(line)
}
return memo
}, [[], []])
}
const unsplitStack = (messageLines, stackLines) => {
return _.castArray(messageLines).concat(stackLines).join('\n')
}
const getStackLines = (stack) => {
const [, stackLines] = splitStack(stack)
return stackLines
}
const stackWithoutMessage = (stack) => {
return getStackLines(stack).join('\n')
}
const hasCrossFrameStacks = (specWindow) => {
// get rid of the top lines since they naturally have different line:column
const normalize = (stack) => {
@@ -323,17 +294,6 @@ const normalizedUserInvocationStack = (userInvocationStack) => {
return normalizeStackIndentation(winnowedStackLines)
}
const replacedStack = (err, newStack) => {
// if err already lacks a stack or we've removed the stack
// for some reason, keep it stackless
if (!err.stack) return err.stack
const errString = err.toString()
const stackLines = getStackLines(newStack)
return unsplitStack(errString, stackLines)
}
module.exports = {
getCodeFrame,
getSourceStack,

View File

@@ -55,6 +55,18 @@ module.exports = {
return console.log(...msgs)
},
monkeypatchBefore (origFn, fn) {
return function () {
const newArgs = fn.apply(this, arguments)
if (newArgs !== undefined) {
return origFn.apply(this, newArgs)
}
return origFn.apply(this, arguments)
}
},
unwrapFirst (val) {
// this method returns the first item in an array
// and if its still a jquery object, then we return
@@ -89,7 +101,7 @@ module.exports = {
return _.reduce(props, (memo, prop) => {
if (_.has(obj, prop) || obj[prop] !== undefined) {
memo[prop] = obj[prop]
memo[prop] = _.result(obj, prop)
}
return memo
@@ -297,6 +309,10 @@ module.exports = {
return Math.sqrt((deltaX * deltaX) + (deltaY * deltaY))
},
getTestFromRunnable (r) {
return r.ctx.currentTest || r
},
memoize (func, cacheInstance = new Map()) {
const memoized = function (...args) {
const key = args[0]

View File

@@ -10,6 +10,7 @@ const addLog = function (runner, log) {
renderProps: {},
state: 'passed',
testId: 'r3',
testCurrentRetry: 0,
type: 'parent',
url: 'http://example.com',
}

View File

@@ -90,7 +90,7 @@ describe('controls', function () {
// need to add an input since this environment is isolated
$body.append('<input id="temp-input" />')
})
.get('#temp-input').type('r')
.get('#temp-input').type('r', { force: true })
.then(() => {
expect(runner.emit).not.to.have.been.calledWith('runner:restart')
})

View File

@@ -0,0 +1,29 @@
const _ = Cypress._
const sendLog = (runner, log, event) => {
const defaultLog = {
event: false,
hookName: 'test',
id: _.uniqueId('l'),
instrument: 'command',
renderProps: {},
state: 'passed',
testId: 'r3',
type: 'parent',
url: 'http://example.com',
}
runner.emit(event, _.extend(defaultLog, log))
}
export const updateLog = (runner, log) => {
sendLog(runner, log, 'reporter:log:state:changed')
}
export const addLog = (runner, log) => {
sendLog(runner, log, 'reporter:log:add')
}
export const addLogs = (runner, logs) => {
_.forEach(logs, addLog.bind(null, runner))
}

View File

@@ -0,0 +1,201 @@
import _ from 'lodash'
import { action, computed, observable } from 'mobx'
import Agent, { AgentProps } from '../agents/agent-model'
import Command, { CommandProps } from '../commands/command-model'
import Err from '../errors/err-model'
import Route, { RouteProps } from '../routes/route-model'
import Test, { UpdatableTestProps, TestProps, TestState } from '../test/test-model'
import Hook, { HookName } from '../hooks/hook-model'
import { FileDetails } from '@packages/ui-components'
import { LogProps } from '../runnables/runnables-store'
import Log from '../instruments/instrument-model'
export default class Attempt {
@observable agents: Agent[] = []
@observable commands: Command[] = []
@observable err = new Err({})
@observable hooks: Hook[] = []
// TODO: make this an enum with states: 'QUEUED, ACTIVE, INACTIVE'
@observable isActive: boolean | null = null
@observable routes: Route[] = []
@observable _state?: TestState | null = null
@observable _invocationCount: number = 0
@observable invocationDetails?: FileDetails
@observable hookCount: { [name in HookName]: number } = {
'before all': 0,
'before each': 0,
'after all': 0,
'after each': 0,
'test body': 0,
}
@observable _isOpen: boolean|null = null
@observable isOpenWhenLast: boolean | null = null
_callbackAfterUpdate: Function | null = null
testId: string
@observable id: number
test: Test
_logs: {[key: string]: Log} = {}
constructor (props: TestProps, test: Test) {
this.testId = props.id
this.id = props.currentRetry || 0
this.test = test
this._state = props.state
this.err.update(props.err)
this.invocationDetails = props.invocationDetails
this.hooks = _.map(props.hooks, (hook) => new Hook(hook))
_.each(props.agents, this.addLog)
_.each(props.commands, this.addLog)
_.each(props.routes, this.addLog)
}
@computed get hasCommands () {
return !!this.commands.length
}
@computed get isLongRunning () {
return this.isActive && this._hasLongRunningCommand
}
@computed get _hasLongRunningCommand () {
return _.some(this.commands, (command) => {
return command.isLongRunning
})
}
@computed get state () {
return this._state || (this.isActive ? 'active' : 'processing')
}
@computed get isLast () {
return this.id === this.test.lastAttempt.id
}
@computed get isOpen () {
if (this._isOpen !== null) {
return this._isOpen
}
// prev attempts open by default while test is running, otherwise only the last is open
return this.test.isActive || this.isLast
}
addLog = (props: LogProps) => {
switch (props.instrument) {
case 'command': {
return this._addCommand(props as CommandProps)
}
case 'agent': {
return this._addAgent(props as AgentProps)
}
case 'route': {
return this._addRoute(props as RouteProps)
}
default: {
throw new Error(`Attempted to add log for unknown instrument: ${props.instrument}`)
}
}
}
updateLog (props: LogProps) {
const log = this._logs[props.id]
if (log) {
log.update(props)
}
}
commandMatchingErr () {
return _(this.hooks)
.map((hook) => {
return hook.commandMatchingErr(this.err)
})
.compact()
.last()
}
@action start () {
this.isActive = true
}
@action update (props: UpdatableTestProps) {
if (props.state) {
this._state = props.state
}
this.err.update(props.err)
if (props.hookId) {
const hook = _.find(this.hooks, { hookId: props.hookId })
if (hook && props.err) {
hook.failed = true
}
}
if (props.isOpen != null) {
this.isOpenWhenLast = props.isOpen
}
}
@action finish (props: UpdatableTestProps) {
this.update(props)
this.isActive = false
}
_addAgent (props: AgentProps) {
const agent = new Agent(props)
this._logs[props.id] = agent
this.agents.push(agent)
return agent
}
_addRoute (props: RouteProps) {
const route = new Route(props)
this._logs[props.id] = route
this.routes.push(route)
return route
}
_addCommand (props: CommandProps) {
const command = new Command(props)
this._logs[props.id] = command
this.commands.push(command)
const hookIndex = _.findIndex(this.hooks, { hookId: command.hookId })
const hook = this.hooks[hookIndex]
hook.addCommand(command)
// make sure that hooks are in order of invocation
if (hook.invocationOrder === undefined) {
hook.invocationOrder = this._invocationCount++
if (hook.invocationOrder !== hookIndex) {
this.hooks[hookIndex] = this.hooks[hook.invocationOrder]
this.hooks[hook.invocationOrder] = hook
}
}
// assign number if non existent
if (hook.hookNumber === undefined) {
hook.hookNumber = ++this.hookCount[hook.hookName]
}
return command
}
}

View File

@@ -0,0 +1,150 @@
.reporter {
.attempts {
.attempt-item > .collapsible > .collapsible-header-wrapper {
display: none;
}
&.has-multiple-attempts .attempt-item > .collapsible > .collapsible-header-wrapper {
display: flex;
}
}
.attempt-item {
margin-bottom: 7px;
> .collapsible {
position: relative;
margin-right: 20px;
.collapsible-header-inner {
outline: none;
}
&:before {
border-left: 1px solid #dcdcdc;
content: '';
left: 5px;
position: absolute;
top: 22px;
height: 15px;
}
&.is-open:before {
display: none;
}
}
&:last-child > .collapsible:before {
display: none;
}
> .is-open .open-close-indicator {
i.fa-angle-down {
margin-top: 0;
}
i.fa-angle-up {
order: 1;
margin-top: -4px;
}
}
.open-close-indicator {
display: flex;
flex-direction: column;
i {
margin-right: 5px;
&.fa-angle-down {
margin-top: -4px;
}
}
}
}
.attempt-state-active {
.attempt-state {
@include runnable-state-active;
}
}
.attempt-state-processing {
.attempt-state {
@include runnable-state-processing;
}
}
.attempt-state-failed {
.attempt-state {
@include runnable-state-failed;
}
.attempt-name:after {
color: $fail;
}
}
.attempt-state-passed {
.attempt-state {
@include runnable-state-passed;
}
.attempt-name:after {
color: $pass;
}
}
.attempt-name {
display: flex;
justify-content: flex-end;
position: relative;
width: 100%;
&:before {
border-top: 1px solid #dcdcdc;
content: '';
left: 15px;
position: absolute;
right: 0;
top: 13px;
}
&:after {
color: #a2a2a2;
content: '';
left: 3px;
position: absolute;
top: 6px;
}
.attempt-tag {
align-items: center;
border: 1px solid #d5d5d5;
border-radius: 7px;
box-shadow: 0 1px 1px 0 rgba(0, 0, 0, 0.20);
display: flex;
font-size: 11px;
padding: 2px 5px;
position: relative;
background-color: #fff;
user-select: none;
cursor: pointer;
&:hover {
background-color: #e8e8e8;
}
}
.collapsible-indicator,
.collapsible-more {
display: none;
}
.attempt-state {
margin-left: 3px;
}
}
}

View File

@@ -0,0 +1,102 @@
import cs from 'classnames'
import _ from 'lodash'
import { observer } from 'mobx-react'
import React, { Component } from 'react'
import Agents from '../agents/agents'
import Collapsible from '../collapsible/collapsible'
import Hooks from '../hooks/hooks'
import Routes from '../routes/routes'
import TestError from '../errors/test-error'
import TestModel from '../test/test-model'
import AttemptModel from './attempt-model'
const NoCommands = () => (
<ul className='hooks-container'>
<li className='no-commands'>
No commands were issued in this test.
</li>
</ul>
)
const AttemptHeader = ({ index }:{index: number}) => (
<span className='attempt-tag'>
<span className='open-close-indicator'>
<i className='fa fa-fw fa-angle-up' />
<i className='fa fa-fw fa-angle-down' />
</span>
Attempt {index + 1}
<i className="attempt-state fa fa-fw" />
</span>
)
function renderAttemptContent (model: AttemptModel) {
// performance optimization - don't render contents if not open
return (
<div className={`attempt-${model.id + 1}`}>
<Agents model={model} />
<Routes model={model} />
<div ref='commands' className='runnable-commands-region'>
{model.hasCommands ? <Hooks model={model} /> : <NoCommands />}
</div>
<div className='attempt-error-region'>
<TestError model={model} isTestError={model.isLast} />
</div>
</div>
)
}
@observer
class Attempt extends Component<{model: AttemptModel, scrollIntoView: Function}> {
componentDidUpdate () {
this.props.scrollIntoView()
}
render () {
const { model } = this.props
// HACK: causes component update when command log is added
model.commands.length
return (
<li
key={model.id}
className={cs('attempt-item', `attempt-state-${model.state}`, {
'attempt-failed': model.state === 'failed',
})}
ref="container"
>
<Collapsible
header={<AttemptHeader index={model.id}/>}
headerClass='attempt-name'
isOpen={model.isOpen}
>
{renderAttemptContent(model)}
</Collapsible>
</li>
)
}
}
const Attempts = observer(({ test, scrollIntoView }: {test: TestModel, scrollIntoView: Function}) => {
return (<ul className={cs('attempts', {
'has-multiple-attempts': test.hasMultipleAttempts,
})}>
{_.map(test.attempts, (attempt) => {
return (
<Attempt
key={attempt.id}
scrollIntoView={scrollIntoView}
model={attempt}
/>
)
})}
</ul>)
})
export { Attempt, AttemptHeader, NoCommands }
export default Attempts

View File

@@ -1,6 +1,5 @@
import React from 'react'
import { shallow } from 'enzyme'
import Collapsible from './collapsible'
describe('<Collapsible />', () => {

View File

@@ -11,7 +11,6 @@ interface Props {
headerExtras?: ReactNode
containerRef?: RefObject<HTMLDivElement>
contentClass?: string
toggleOpen?: (isOpen: boolean) => any
}
interface State {

View File

@@ -110,9 +110,6 @@ export default class Command extends Instrument {
if (this._becameNonPending()) {
clearTimeout(this._pendingTimeout as TimeoutID)
action('became:inactive', () => {
return this.isLongRunning = false
})()
}
this._prevState = this.state

View File

@@ -222,6 +222,7 @@
.command-state-pending .command-number {
i {
line-height: 18px;
display: inline-block;
}

View File

@@ -10,7 +10,7 @@ import ErrorStack from '../errors/error-stack'
import events from '../lib/events'
import FlashOnClick from '../lib/flash-on-click'
import { onEnterOrSpace } from '../lib/util'
import TestModel from '../test/test-model'
import Attempt from '../attempts/attempt-model'
interface DocsUrlProps {
url: string | string[]
@@ -31,7 +31,8 @@ const DocsUrl = ({ url }: DocsUrlProps) => {
}
interface TestErrorProps {
model: TestModel
model: Attempt
isTestError?: boolean
}
const TestError = observer((props: TestErrorProps) => {
@@ -40,7 +41,7 @@ const TestError = observer((props: TestErrorProps) => {
md.enable(['backticks', 'emphasis', 'escape'])
const onPrint = () => {
events.emit('show:error', props.model.id)
events.emit('show:error', props.model)
}
const _onPrintClick = (e: MouseEvent) => {

View File

@@ -65,6 +65,7 @@ class StatsStore {
this._currentTime = Date.now()
}
@action
incrementCount (type: TestState) {
const countKey = `num${_.capitalize(type)}`

View File

@@ -16,10 +16,11 @@ export interface InstrumentProps {
name?: string
message?: string
type?: string
testCurrentRetry: number
state?: string | null
referencesAlias?: Alias
instrument?: 'agent' | 'command' | 'route'
testId: number
testId: string
}
export default class Log {

View File

@@ -4,10 +4,15 @@ body,div,dl,dt,dd,ul,ol,li,h1,h2,h3,h4,h5,h6,pre,code,form,fieldset,legend,input
.reporter {
background-color: #F6F6F6;
bottom: 0;
color: #555;
display: flex;
flex-direction: column;
font-size: 12px;
left: 0;
position: absolute;
right: 0;
top: 0;
* {
box-sizing: border-box;

View File

@@ -217,11 +217,16 @@ describe('events', () => {
expect(runnablesStore.runnableFinished).to.have.been.calledWith('the runnable')
})
it('increments the stats count on test:after:run', () => {
runner.on.withArgs('test:after:run').callArgWith(1, { state: 'passed' })
it('increments the stats count on test:after:run if final: true', () => {
runner.on.withArgs('test:after:run').callArgWith(1, { state: 'passed', final: true })
expect(statsStore.incrementCount).to.have.been.calledWith('passed')
})
it('does not increment the stats count on test:after:run if not final: true', () => {
runner.on.withArgs('test:after:run').callArgWith(1, { state: 'passed' })
expect(statsStore.incrementCount).not.to.have.been.called
})
it('pauses the appState with next command name on paused', () => {
runner.on.withArgs('paused').callArgWith(1, 'next command')
expect(appState.pause).to.have.been.calledWith('next command')
@@ -303,12 +308,12 @@ describe('events', () => {
})
it('emits runner:console:error with test id on show:error', () => {
const err = { isCommandErr: false }
const test = { err: { isCommandErr: false } }
runnablesStore.testById.returns({ err })
events.emit('show:error', 'test id')
runnablesStore.testById.returns(test)
events.emit('show:error', test)
expect(runner.emit).to.have.been.calledWith('runner:console:error', {
err,
err: test.err,
commandId: undefined,
})
})
@@ -319,7 +324,7 @@ describe('events', () => {
} }
runnablesStore.testById.returns(test)
events.emit('show:error', 'test id')
events.emit('show:error', test)
expect(runner.emit).to.have.been.calledWith('runner:console:error', {
err: test.err,
commandId: 'matching command id',
@@ -332,7 +337,7 @@ describe('events', () => {
} }
runnablesStore.testById.returns(test)
events.emit('show:error', 'test id')
events.emit('show:error', test)
expect(runner.emit).to.have.been.calledWith('runner:console:error', {
err: test.err,
commandId: undefined,

View File

@@ -4,7 +4,7 @@ import appState, { AppState } from './app-state'
import runnablesStore, { RunnablesStore, RootRunnable, LogProps } from '../runnables/runnables-store'
import statsStore, { StatsStore, StatsStoreStartInfo } from '../header/stats-store'
import scroller, { Scroller } from './scroller'
import TestModel, { TestProps, UpdateTestCallback } from '../test/test-model'
import TestModel, { UpdatableTestProps, UpdateTestCallback, TestProps } from '../test/test-model'
const localBus = new EventEmitter()
@@ -93,17 +93,19 @@ const events: Events = {
}
}))
runner.on('test:before:run:async', action('test:before:run:async', (runnable: TestModel) => {
runner.on('test:before:run:async', action('test:before:run:async', (runnable: TestProps) => {
runnablesStore.runnableStarted(runnable)
}))
runner.on('test:after:run', action('test:after:run', (runnable: TestModel) => {
runner.on('test:after:run', action('test:after:run', (runnable: TestProps) => {
runnablesStore.runnableFinished(runnable)
statsStore.incrementCount(runnable.state)
if (runnable.final) {
statsStore.incrementCount(runnable.state!)
}
}))
runner.on('test:set:state', action('test:set:state', (runnable: TestProps, cb: UpdateTestCallback) => {
runnablesStore.updateTest(runnable, cb)
runner.on('test:set:state', action('test:set:state', (props: UpdatableTestProps, cb: UpdateTestCallback) => {
runnablesStore.updateTest(props, cb)
}))
runner.on('paused', action('paused', (nextCommandName: string) => {
@@ -160,13 +162,12 @@ const events: Events = {
runner.emit('runner:console:log', commandId)
})
localBus.on('show:error', (testId: number) => {
const test = runnablesStore.testById(testId)
const command = test.err.isCommandErr && test.commandMatchingErr()
localBus.on('show:error', (test: TestModel) => {
const command = test.err.isCommandErr ? test.commandMatchingErr() : null
runner.emit('runner:console:error', {
err: test.err,
commandId: command ? command.id : undefined,
commandId: command?.id,
})
})

View File

@@ -0,0 +1,32 @@
@mixin runnable-state-active {
@extend .#{$fa-css-prefix}-sync-alt;
@extend .#{$fa-css-prefix}-spin;
}
@mixin runnable-state-processing {
@extend .far;
@extend .#{$fa-css-prefix}-square;
color: #888;
line-height: 18px; // @extend .far overrides line-height, so we need to set it again
}
@mixin runnable-state-skipped {
@extend .#{$fa-css-prefix}-ban;
color: #888;
}
@mixin runnable-state-failed {
@extend .#{$fa-css-prefix}-times;
color: $fail;
}
@mixin runnable-state-passed {
@extend .#{$fa-css-prefix}-check;
color: $pass;
}
@mixin runnable-state-pending {
@extend .#{$fa-css-prefix}-circle-notch;
color: lighten($pending, 20%);
}

View File

@@ -5,6 +5,7 @@ $pinned: #9442ca;
$yellow-dark: #FFB61C;
$yellow-medium: lighten($yellow-dark, 25%);
$yellow-lightest: #ffffee;
$retried: #f0ec98;
$link-text: #3380FF;

View File

@@ -1,6 +1,7 @@
// this file is imported by the runner's main.scss
// if you update this file, also update main.scss
@import 'lib/variables';
@import 'lib/mixins';
@import 'lib/base';
@import 'lib/tooltip';
@import '../../../node_modules/@reach/dialog/styles.css';

View File

@@ -1,6 +1,7 @@
// this file is used when developing the reporter in isolation via cypress tests
// if you update this file, also update main-runner.scss
@import 'lib/variables';
@import 'lib/mixins';
@import 'lib/fonts';
@import 'lib/base';
@import 'lib/tooltip';

View File

@@ -104,7 +104,7 @@ describe('<Runnable />', () => {
})
it('renders a runnable for each child', () => {
const component = shallow(<Suite model={model({ children: [{ id: 1 }, { id: 2 }] } as SuiteModel)} />)
const component = shallow(<Suite model={model({ children: [{ id: 1 }, { id: 2 }] } as unknown as SuiteModel)} />)
expect(component.find(Runnable).length).to.equal(2)
})

View File

@@ -48,6 +48,7 @@ class Runnable extends Component<RunnableProps> {
return (
<li
className={cs(`${model.type} runnable runnable-${model.state}`, {
'runnable-retried': model.hasRetried,
hover: this.isHovering,
})}
onMouseOver={this._hover(true)}

View File

@@ -2,13 +2,13 @@ import { observable } from 'mobx'
import { HookProps } from '../hooks/hook-model'
export interface RunnableProps {
id: number
id: string
title?: string
hooks: Array<HookProps>
}
export default class Runnable {
@observable id: number
@observable id: string
@observable shouldRender: boolean = false
@observable title?: string
@observable level: number

View File

@@ -33,30 +33,28 @@ const scrollerStub = () => {
const createHook = (hookId: string) => {
return { hookId, hookName: 'before each' } as HookProps
}
const createTest = (id: number) => {
return { id, title: `test ${id}`, hooks: [], state: 'processing' } as TestProps
const createTest = (id: string) => {
return { id, title: `test ${id}`, hooks: [], state: 'processing', currentRetry: 0 } as TestProps
}
const createSuite = (id: number, tests: Array<TestProps>, suites: Array<SuiteProps>) => {
const createSuite = (id: string, tests: Array<TestProps>, suites: Array<SuiteProps>) => {
return { id, title: `suite ${id}`, tests, suites, hooks: [] } as SuiteProps
}
const createAgent = (id: number, testId: number) => {
return { id, testId, instrument: 'agent' } as AgentProps
const createAgent = (id: number, testId: string) => {
return { id, testId, instrument: 'agent', callCount: 0, testCurrentRetry: 0, functionName: 'foo' } as AgentProps
}
const createCommand = (id: number, testId: number, hookId?: string) => {
const createCommand = (id: number, testId: string, hookId?: string) => {
return { id, testId, instrument: 'command', hookId } as CommandProps
}
const createRoute = (id: number, testId: number) => {
const createRoute = (id: number, testId: string) => {
return { id, testId, instrument: 'route' } as RouteProps
}
const createRootRunnable = () => {
return {
tests: [createTest(1)],
tests: [createTest('1')],
suites: [
createSuite(1, [createTest(2), createTest(3)], [
createSuite(3, [createTest(4)], []), createSuite(4, [createTest(5)], []),
]),
createSuite(2, [createTest(6)], []),
createSuite('1', [createTest('2'), createTest('3')], [createSuite('3', [createTest('4')], []), createSuite('4', [createTest('5')], [])]),
createSuite('2', [createTest('6')], []),
],
} as RootRunnable
}
@@ -100,14 +98,14 @@ describe('runnables store', () => {
it('adds logs to tests when specified', () => {
const rootRunnable = createRootRunnable()
rootRunnable.tests![0].agents = [createAgent(1, 1), createAgent(2, 1), createAgent(3, 1)]
rootRunnable.tests![0].commands = [createCommand(1, 1, 'h1')]
rootRunnable.tests![0].routes = [createRoute(1, 1), createRoute(2, 1)]
rootRunnable.tests![0].agents = [createAgent(1, '1'), createAgent(2, '1'), createAgent(3, '1')]
rootRunnable.tests![0].commands = [createCommand(1, '1', 'h1')]
rootRunnable.tests![0].routes = [createRoute(1, '1'), createRoute(2, '1')]
rootRunnable.tests![0].hooks = [createHook('h1')]
instance.setRunnables(rootRunnable)
expect((instance.runnables[0] as TestModel).agents.length).to.equal(3)
expect((instance.runnables[0] as TestModel).commands.length).to.equal(1)
expect((instance.runnables[0] as TestModel).routes.length).to.equal(2)
expect((instance.runnables[0] as TestModel).lastAttempt.agents.length).to.equal(3)
expect((instance.runnables[0] as TestModel).lastAttempt.commands.length).to.equal(1)
expect((instance.runnables[0] as TestModel).lastAttempt.routes.length).to.equal(2)
})
it('sets the appropriate nesting levels', () => {
@@ -142,17 +140,17 @@ describe('runnables store', () => {
})
it('sets .hasTests flag to false if there are no tests', () => {
instance.setRunnables({ tests: [], suites: [createSuite(1, [], []), createSuite(2, [], [])] })
instance.setRunnables({ tests: [], suites: [createSuite('1', [], []), createSuite('2', [], [])] })
expect(instance.hasTests).to.be.false
})
it('sets .hasSingleTest flag to true if there is only one test', () => {
instance.setRunnables({ tests: [], suites: [createSuite(1, [], []), createSuite(2, [createTest(1)], [])] })
instance.setRunnables({ tests: [], suites: [createSuite('1', [], []), createSuite('2', [createTest('1')], [])] })
expect(instance.hasSingleTest).to.be.true
})
it('sets .hasSingleTest flag to false if there are no tests', () => {
instance.setRunnables({ tests: [], suites: [createSuite(1, [], []), createSuite(2, [], [])] })
instance.setRunnables({ tests: [], suites: [createSuite('1', [], []), createSuite('2', [], [])] })
expect(instance.hasSingleTest).to.be.false
})
@@ -162,7 +160,7 @@ describe('runnables store', () => {
})
it('starts rendering the runnables on requestAnimationFrame', () => {
instance.setRunnables({ tests: [], suites: [createSuite(1, [], []), createSuite(2, [createTest(1)], [])] })
instance.setRunnables({ tests: [], suites: [createSuite('1', [], []), createSuite('2', [createTest('1')], [])] })
expect(instance.runnables[0].shouldRender).to.be.true
expect(instance.runnables[1].shouldRender).to.be.true
expect((instance.runnables[1] as SuiteModel).children[0].shouldRender).to.be.true
@@ -202,44 +200,44 @@ describe('runnables store', () => {
context('#runnableStarted', () => {
it('starts the test with the given id', () => {
instance.setRunnables({ tests: [createTest(1)], suites: [] })
instance.runnableStarted({ id: 1 } as TestModel)
instance.setRunnables({ tests: [createTest('1')], suites: [] })
instance.runnableStarted({ id: '1' } as TestProps)
expect((instance.runnables[0] as TestModel).isActive).to.be.true
})
})
context('#runnableFinished', () => {
it('finishes the test with the given id', () => {
instance.setRunnables({ tests: [createTest(1)], suites: [] })
instance.runnableStarted({ id: 1 } as TestModel)
instance.runnableFinished({ id: 1 } as TestModel)
instance.setRunnables({ tests: [createTest('1')], suites: [] })
instance.runnableStarted({ id: '1' } as TestProps)
instance.runnableFinished({ id: '1' } as TestProps)
expect((instance.runnables[0] as TestModel).isActive).to.be.false
})
})
context('#testByid', () => {
it('returns the test with the given id', () => {
instance.setRunnables({ tests: [createTest(1), createTest(3)], suites: [] })
expect(instance.testById(3).title).to.be.equal('test 3')
instance.setRunnables({ tests: [createTest('1'), createTest('3')], suites: [] })
expect(instance.testById('3').title).to.be.equal('test 3')
})
})
context('#updateLog', () => {
it('updates the log', () => {
const test = createTest(1)
const test = createTest('1')
test.hooks = [createHook('h1')]
instance.setRunnables({ tests: [test] })
instance.addLog(createCommand(1, 1, 'h1'))
instance.updateLog({ id: 1, name: 'new name' } as LogProps)
expect(instance.testById(1).commands[0].name).to.equal('new name')
instance.addLog(createCommand(1, '1', 'h1'))
instance.updateLog({ id: 1, testId: '1', name: 'new name' } as LogProps)
expect(instance.testById('1').lastAttempt.commands[0].name).to.equal('new name')
})
})
context('#reset', () => {
it('resets flags to default values', () => {
instance.setRunnables({ tests: [createTest(1)] })
instance.setRunnables({ tests: [createTest('1')] })
instance.attemptingShowSnapshot = true
instance.showingSnapshot = true
instance.reset()
@@ -252,15 +250,15 @@ describe('runnables store', () => {
})
it('resets runnables', () => {
instance.setRunnables({ tests: [createTest(1)] })
instance.setRunnables({ tests: [createTest('1')] })
instance.reset()
expect(instance.runnables.length).to.equal(0)
})
it('resets tests', () => {
instance.setRunnables({ tests: [createTest(1)] })
instance.setRunnables({ tests: [createTest('1')] })
instance.reset()
expect(instance.testById(1)).to.be.undefined
expect(instance.testById('1')).to.be.undefined
})
})
})

View File

@@ -8,7 +8,7 @@ import RouteModel, { RouteProps } from '../routes/route-model'
import scroller, { Scroller } from '../lib/scroller'
import { HookProps } from '../hooks/hook-model'
import SuiteModel, { SuiteProps } from './suite-model'
import TestModel, { TestProps, UpdateTestCallback } from '../test/test-model'
import TestModel, { TestProps, UpdateTestCallback, UpdatableTestProps } from '../test/test-model'
import RunnableModel from './runnable-model'
const defaults = {
@@ -29,7 +29,7 @@ export type LogProps = AgentProps | CommandProps | RouteProps
export type RunnableArray = Array<TestModel | SuiteModel>
type Log = AgentModel | CommandModel | RouteModel
export type Log = AgentModel | CommandModel | RouteModel
export interface RootRunnable {
hooks?: Array<HookProps>
@@ -102,15 +102,11 @@ class RunnablesStore {
}
_createTest (props: TestProps, level: number) {
const test = new TestModel(props, level)
const test = new TestModel(props, level, this)
this._runnablesQueue.push(test)
this._tests[test.id] = test
_.each(props.agents, this.addLog.bind(this))
_.each(props.commands, this.addLog.bind(this))
_.each(props.routes, this.addLog.bind(this))
return test
}
@@ -151,66 +147,35 @@ class RunnablesStore {
this._initialScrollTop = initialScrollTop
}
updateTest (props: TestProps, cb: UpdateTestCallback) {
updateTest (props: UpdatableTestProps, cb: UpdateTestCallback) {
this._withTest(props.id, (test) => {
return test.update(props, cb)
test.update(props, cb)
})
}
runnableStarted ({ id }: TestModel) {
this._withTest(id, (test) => {
return test.start()
})
}
runnableFinished (props: TestModel) {
runnableStarted (props: TestProps) {
this._withTest(props.id, (test) => {
return test.finish(props)
test.start(props)
})
}
testById (id: number) {
runnableFinished (props: TestProps) {
this._withTest(props.id, (test) => {
test.finish(props)
})
}
testById (id: string) {
return this._tests[id]
}
addLog (log: LogProps) {
switch (log.instrument) {
case 'command': {
const command = new CommandModel(log as CommandProps)
this._logs[log.id] = command
this._withTest(log.testId, (test) => {
return test.addCommand(command)
})
break
}
case 'agent': {
const agent = new AgentModel(log as AgentProps)
this._logs[log.id] = agent
this._withTest(log.testId, (test) => {
return test.addAgent(agent)
})
break
}
case 'route': {
const route = new RouteModel(log as RouteProps)
this._logs[log.id] = route
this._withTest(log.testId, (test) => {
return test.addRoute(route)
})
break
}
default:
throw new Error(`Attempted to add log for unknown instrument: ${log.instrument}`)
}
this._withTest(log.testId, (test) => {
test.addLog(log)
})
}
_withTest (id: number, cb: ((test: TestModel) => void)) {
_withTest (id: string, cb: ((test: TestModel) => void)) {
// we get events for suites and tests, but only tests change during a run,
// so if the id isn't found in this._tests, we ignore it b/c it's a suite
const test = this._tests[id]
@@ -218,13 +183,10 @@ class RunnablesStore {
if (test) cb(test)
}
updateLog (log: LogProps) {
const found = this._logs[log.id]
if (found) {
// The type of found is Log (one of Agent, Command, Route). So, we need any here.
found.update(log as any)
}
updateLog (props: LogProps) {
this._withTest(props.testId, (test) => {
test.updateLog(props)
})
}
reset () {
@@ -234,7 +196,6 @@ class RunnablesStore {
this.runnables = []
this._tests = {}
this._logs = {}
this._runnablesQueue = []
}
}

View File

@@ -57,7 +57,7 @@
}
}
&.test.hover {
.attempt-item:hover {
> .runnable-wrapper .runnable-controls i.fa-redo {
visibility: visible !important;
}
@@ -69,12 +69,11 @@
&.runnable-active {
.runnable-state {
@extend .#{$fa-css-prefix}-sync-alt;
@extend .#{$fa-css-prefix}-spin;
@include runnable-state-active;
}
}
.runnable-state {
.runnable-state,.attempt-state {
display: inline-block;
line-height: 18px;
margin-right: 5px;
@@ -90,12 +89,10 @@
color: #bbbcbd;
}
&.test.runnable-processing {
&.test.runnable-processing {
.runnable-state {
@extend .far;
line-height: 18px; // @extend .far overrides line-height, so we need to set it again
@extend .#{$fa-css-prefix}-square;
color: #888;
@include runnable-state-processing;
}
}
@@ -119,10 +116,14 @@
border-left: 5px solid $pass;
}
.runnable-retried > div > .runnable-wrapper,
.runnable-retried > div > .runnable-instruments {
border-left: 5px solid $retried;
}
&.runnable-skipped > .runnable-wrapper {
.runnable-state {
@extend .#{$fa-css-prefix}-ban;
color: #888;
@include runnable-state-skipped;
}
.runnable-title {
@@ -137,8 +138,7 @@
&.test.runnable-failed {
.runnable-state {
@extend .#{$fa-css-prefix}-times;
color: $fail;
@include runnable-state-failed;
}
}
@@ -152,21 +152,18 @@
&.test.runnable-passed {
.runnable-state {
@extend .#{$fa-css-prefix}-check;
color: $pass;
@include runnable-state-passed;
}
}
&.test.runnable-pending {
.runnable-state {
@include runnable-state-pending;
}
.runnable-title {
color: lighten($pending, 25%);
}
.runnable-state {
@extend .#{$fa-css-prefix}-circle-notch;
color: lighten($pending, 20%);
}
.runnable-commands-region {
display: none;
}

View File

@@ -46,7 +46,7 @@ describe('<Runnables />', () => {
it('renders <RunnablesList /> when there are runnables', () => {
const component = shallow(
<Runnables
runnablesStore={runnablesStoreStub({ runnables: [{ id: 1 }] as TestModel[] })}
runnablesStore={runnablesStoreStub({ runnables: [{ id: '1' }] as TestModel[] })}
scroller={scrollerStub()}
spec={specStub}
/>,
@@ -134,7 +134,7 @@ describe('<Runnables />', () => {
context('<RunnablesList />', () => {
it('renders a runnable for each runnable in model', () => {
const component = shallow(<RunnablesList runnables={[{ id: 1 } as TestModel, { id: 2 } as TestModel]} />)
const component = shallow(<RunnablesList runnables={[{ id: '1' } as TestModel, { id: '2' } as TestModel]} />)
expect(component.find('Runnable').length).to.equal(2)
})

View File

@@ -2,7 +2,7 @@ import Suite from './suite-model'
import TestModel from '../test/test-model'
const suiteWithChildren = (children: Array<Partial<TestModel>>) => {
const suite = new Suite({ id: 1, title: '', hooks: [] }, 0)
const suite = new Suite({ id: '1', title: '', hooks: [] }, 0)
suite.children = children as Array<TestModel>

View File

@@ -32,6 +32,10 @@ export default class Suite extends Runnable {
return _.map(this.children, 'state')
}
@computed get hasRetried (): boolean {
return _.some(this.children, (v) => v.hasRetried)
}
@computed get _anyChildrenFailed () {
return _.some(this._childStates, (state) => {
return state === 'failed'

View File

@@ -1,37 +1,56 @@
import { HookProps } from '../hooks/hook-model'
import Command, { CommandProps } from '../commands/command-model'
import Agent from '../agents/agent-model'
import Route from '../routes/route-model'
import Err from '../errors/err-model'
import _ from 'lodash'
import TestModel, { TestProps, UpdatableTestProps } from './test-model'
import CommandModel, { CommandProps } from '../commands/command-model'
import { RouteProps } from '../routes/route-model'
import { RunnablesStore } from '../runnables/runnables-store'
import { AgentProps } from '../agents/agent-model'
import TestModel, { TestProps } from './test-model'
const createTest = (props: Partial<TestProps> = {}, store = {}) => {
const defaults = {
currentRetry: 0,
id: 'r3',
prevAttempts: [],
state: null,
hooks: [],
} as TestProps
const commandHook: (hookId: string) => Partial<Command> = (hookId: string) => {
return {
hookId,
isMatchingEvent: () => {
return false
},
}
return new TestModel(_.defaults(props, defaults), 0, store as RunnablesStore)
}
const createCommand = (props: Partial<CommandProps> = {}) => {
const defaults = {
instrument: 'command',
hookName: '',
id: 1,
hookId: 'r3',
numElements: 1,
testCurrentRetry: 0,
testId: 'r3',
timeout: 4000,
wallClockStartedAt: new Date().toString(),
} as CommandProps
return _.defaults(props, defaults)
}
describe('Test model', () => {
context('.state', () => {
it('is the "state" when it exists', () => {
const test = new TestModel({ id: 1, state: 'passed' } as TestProps, 0)
const test = createTest({ state: 'passed' })
expect(test.state).to.equal('passed')
})
it('is active when there is no state and isActive is true', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
test.isActive = true
test.lastAttempt.isActive = true
expect(test.state).to.equal('active')
})
it('is processing when there is no state and isActive is falsey', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
expect(test.state).to.equal('processing')
})
@@ -39,201 +58,262 @@ describe('Test model', () => {
context('.isLongRunning', () => {
it('start out not long running', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
expect(test.isLongRunning).to.be.false
})
it('is not long running if active but without a long running command', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
test.start()
test.start({} as TestProps)
expect(test.isLongRunning).to.be.false
})
it('becomes long running if active and has a long running command', () => {
const test = new TestModel({ id: 1, hooks: [{ hookId: 'h1' } as HookProps] } as TestProps, 0)
const test = createTest()
test.start()
test.addCommand({ isLongRunning: true, hookId: 'h1' } as Command)
test.start({} as TestProps)
const command = test.addLog(createCommand()) as CommandModel
command.isLongRunning = true
expect(test.isLongRunning).to.be.true
})
it('becomes not long running if it becomes inactive', () => {
const test = new TestModel({ id: 1, hooks: [{ hookId: 'h1' } as HookProps] } as TestProps, 0)
const test = createTest()
test.start()
test.addCommand({ isLongRunning: true, hookId: 'h1' } as Command)
test.finish({})
test.start({} as TestProps)
const command = test.addLog(createCommand()) as CommandModel
command.isLongRunning = true
test.finish({} as UpdatableTestProps)
expect(test.isLongRunning).to.be.false
})
})
context('#addAgent', () => {
it('adds the agent to the agents collection', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
test.addAgent({} as Agent)
expect(test.agents.length).to.equal(1)
test.addLog({ instrument: 'agent' } as AgentProps)
expect(test.lastAttempt.agents.length).to.equal(1)
})
})
context('#addRoute', () => {
it('adds the route to the routes collection', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
test.addRoute({} as Route)
expect(test.routes.length).to.equal(1)
test.addLog({ instrument: 'route' } as RouteProps)
expect(test.lastAttempt.routes.length).to.equal(1)
})
})
context('#addCommand', () => {
it('adds the command to the commands collection', () => {
const test = new TestModel({ id: 1, hooks: [{ hookId: 'h1' } as HookProps] } as TestProps, 0)
const test = createTest()
test.addCommand({ hookId: 'h1' } as Command)
expect(test.commands.length).to.equal(1)
test.addLog(createCommand())
expect(test.lastAttempt.commands.length).to.equal(1)
})
it('creates a hook and adds the command to it if it does not exist', () => {
const test = createTest({ hooks: [
{ hookName: 'before each', hookId: 'h1' },
] })
test.addLog(createCommand({ instrument: 'command', hookId: 'h1' }))
expect(test.lastAttempt.hooks.length).to.equal(2)
expect(test.lastAttempt.hooks[0].hookName).equal('before each')
expect(test.lastAttempt.hooks[0].commands.length).to.equal(1)
})
it('adds the command to an existing hook if it already exists', () => {
const test = createTest({ hooks: [{ hookId: 'h1', hookName: 'before each' }] })
const commandProps = createCommand({
hookId: 'h1',
})
const command = test.addLog(commandProps) as CommandModel
command.isMatchingEvent = () => false
expect(test.lastAttempt.hooks.length).to.equal(2)
expect(test.lastAttempt.hooks[0].hookName).to.equal('before each')
expect(test.lastAttempt.hooks[0].commands.length).to.equal(1)
test.addLog(createCommand({ hookId: 'h1' }))
expect(test.lastAttempt.hooks.length).to.equal(2)
expect(test.lastAttempt.hooks[0].commands.length).to.equal(2)
})
it('adds the command to the correct hook', () => {
const test = new TestModel({
id: 1,
const test = createTest({
hooks: [
{ hookId: 'h1' } as HookProps,
{ hookId: 'h2' } as HookProps,
{ hookId: 'h1', hookName: 'before each' },
{ hookId: 'h2', hookName: 'before each' },
],
} as TestProps, 0)
})
test.addCommand(commandHook('h1') as Command)
expect(test.hooks[0].commands.length).to.equal(1)
expect(test.hooks[1].commands.length).to.equal(0)
expect(test.hooks[2].commands.length).to.equal(0)
test.addLog(createCommand({ hookId: 'h1' }))
expect(test.lastAttempt.hooks[0].commands.length).to.equal(1)
expect(test.lastAttempt.hooks[1].commands.length).to.equal(0)
expect(test.lastAttempt.hooks[2].commands.length).to.equal(0)
test.addCommand(commandHook('1') as Command)
expect(test.hooks[0].commands.length).to.equal(1)
expect(test.hooks[1].commands.length).to.equal(1)
expect(test.hooks[2].commands.length).to.equal(0)
test.addLog(createCommand({ hookId: 'h2' }))
expect(test.lastAttempt.hooks[0].commands.length).to.equal(1)
expect(test.lastAttempt.hooks[1].commands.length).to.equal(1)
expect(test.lastAttempt.hooks[2].commands.length).to.equal(0)
})
it('moves hooks into the correct order', () => {
const test = new TestModel({
id: 1,
const test = createTest({
hooks: [
{ hookId: 'h1' } as HookProps,
{ hookId: 'h2' } as HookProps,
{ hookId: 'h1', hookName: 'before all' },
{ hookId: 'h2', hookName: 'before each' },
],
} as TestProps, 0)
})
test.addCommand(commandHook('h2') as Command)
expect(test.hooks[0].hookId).to.equal('h2')
expect(test.hooks[0].invocationOrder).to.equal(0)
expect(test.hooks[0].commands.length).to.equal(1)
test.addLog(createCommand({ hookId: 'h2' }))
expect(test.lastAttempt.hooks[0].hookId).to.equal('h2')
expect(test.lastAttempt.hooks[0].invocationOrder).to.equal(0)
expect(test.lastAttempt.hooks[0].commands.length).to.equal(1)
test.addCommand(commandHook('h1') as Command)
expect(test.hooks[1].hookId).to.equal('h1')
expect(test.hooks[1].invocationOrder).to.equal(1)
expect(test.hooks[1].commands.length).to.equal(1)
test.addLog(createCommand({ hookId: 'h1' }))
expect(test.lastAttempt.hooks[1].hookId).to.equal('h1')
expect(test.lastAttempt.hooks[1].invocationOrder).to.equal(1)
expect(test.lastAttempt.hooks[1].commands.length).to.equal(1)
})
it('counts and assigns the number of each hook type', () => {
const test = new TestModel({
id: 1,
const test = createTest({
hooks: [
{ hookId: 'h1', hookName: 'before each' } as HookProps,
{ hookId: 'h2', hookName: 'after each' } as HookProps,
{ hookId: 'h3', hookName: 'before each' } as HookProps,
{ hookId: 'h1', hookName: 'before each' },
{ hookId: 'h2', hookName: 'after each' },
{ hookId: 'h3', hookName: 'before each' },
],
} as TestProps, 0)
})
test.addCommand(commandHook('h1') as Command)
expect(test.hookCount['before each']).to.equal(1)
expect(test.hookCount['after each']).to.equal(0)
expect(test.hooks[0].hookNumber).to.equal(1)
test.addLog(createCommand({ hookId: 'h1' }))
expect(test.lastAttempt.hookCount['before each']).to.equal(1)
expect(test.lastAttempt.hookCount['after each']).to.equal(0)
expect(test.lastAttempt.hooks[0].hookNumber).to.equal(1)
test.addCommand(commandHook('h1') as Command)
expect(test.hookCount['before each']).to.equal(1)
expect(test.hookCount['after each']).to.equal(0)
expect(test.hooks[0].hookNumber).to.equal(1)
test.addLog(createCommand({ hookId: 'h1' }))
expect(test.lastAttempt.hookCount['before each']).to.equal(1)
expect(test.lastAttempt.hookCount['after each']).to.equal(0)
expect(test.lastAttempt.hooks[0].hookNumber).to.equal(1)
test.addCommand(commandHook('h3') as Command)
expect(test.hookCount['before each']).to.equal(2)
expect(test.hookCount['after each']).to.equal(0)
expect(test.hooks[1].hookNumber).to.equal(2)
test.addLog(createCommand({ hookId: 'h3' }))
expect(test.lastAttempt.hookCount['before each']).to.equal(2)
expect(test.lastAttempt.hookCount['after each']).to.equal(0)
expect(test.lastAttempt.hooks[1].hookNumber).to.equal(2)
test.addCommand(commandHook('h2') as Command)
expect(test.hookCount['before each']).to.equal(2)
expect(test.hookCount['after each']).to.equal(1)
expect(test.hooks[2].hookNumber).to.equal(1)
test.addLog(createCommand({ hookId: 'h2' }))
expect(test.lastAttempt.hookCount['before each']).to.equal(2)
expect(test.lastAttempt.hookCount['after each']).to.equal(1)
expect(test.lastAttempt.hooks[2].hookNumber).to.equal(1)
})
})
context('#start', () => {
it('sets the test as active', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
test.start()
test.start({} as TestProps)
expect(test.isActive).to.be.true
})
})
context('#finish', () => {
it('sets the test as inactive', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
test.finish({})
test.finish({} as UpdatableTestProps)
expect(test.isActive).to.be.false
})
it('updates the state of the test', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
test.finish({ state: 'failed' })
test.finish({ state: 'failed' } as UpdatableTestProps)
expect(test.state).to.equal('failed')
})
it('updates the test err', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
test.finish({ err: { name: 'SomeError' } as Err })
test.finish({ err: { name: 'SomeError' } as Err } as UpdatableTestProps)
expect(test.err.name).to.equal('SomeError')
})
it('sets the hook to failed if it exists', () => {
const test = new TestModel({ id: 1, hooks: [{ hookId: 'h1' } as HookProps] } as TestProps, 0)
const test = createTest({ hooks: [{ hookId: 'h1', hookName: 'before each' }] })
test.addCommand({ hookId: 'h1' } as Command)
test.finish({ hookId: 'h1' })
expect(test.hooks[0].failed).to.be.true
test.addLog(createCommand({ instrument: 'command' }))
test.finish({ hookId: 'h1', err: { message: 'foo' } as Err } as UpdatableTestProps)
expect(test.lastAttempt.hooks[1].failed).to.be.true
})
it('does not throw error if hook does not exist', () => {
const test = new TestModel({ id: 1 } as TestProps, 0)
const test = createTest()
expect(() => {
test.finish({ hookId: 'h1' })
test.finish({ hookId: 'h1' } as UpdatableTestProps)
}).not.to.throw()
})
})
context('#commandMatchingErr', () => {
it('returns last command matching the error', () => {
const test = new TestModel({ id: 1, err: { message: 'SomeError' } as Err, hooks: [{ hookId: 'h1' } as HookProps] } as TestProps, 0)
const test = createTest({ err: { message: 'SomeError' } as Err, hooks: [
{ hookId: 'h1', hookName: 'before each' },
{ hookId: 'h2', hookName: 'before each' },
] })
test.addCommand(new Command({ err: { message: 'SomeError' } as Err, hookId: 'h1' } as CommandProps))
test.addCommand(new Command({ err: {} as Err, hookId: 'h1' } as CommandProps))
test.addCommand(new Command({ err: { message: 'SomeError' } as Err, hookId: 'h1' } as CommandProps))
test.addCommand(new Command({ err: {} as Err, hookId: 'h1' } as CommandProps))
test.addCommand(new Command({ name: 'The One', err: { message: 'SomeError' } as Err, hookId: 'h1' } as CommandProps))
test.addLog(createCommand({ err: { message: 'SomeError' } as Err, hookId: 'h1' }))
test.addLog(createCommand({ err: {} as Err, hookId: 'h1' }))
test.addLog(createCommand({ err: { message: 'SomeError' } as Err, hookId: 'h1' }))
test.addLog(createCommand({ err: {} as Err, hookId: 'h2' }))
test.addLog(createCommand({ name: 'The One', err: { message: 'SomeError' } as Err, hookId: 'h2' }))
expect(test.commandMatchingErr()!.name).to.equal('The One')
})
it('returns undefined if there are no commands with errors', () => {
const test = new TestModel({ id: 1, err: { message: 'SomeError' } as Err, hooks: [{ hookId: 'h1' } as HookProps] } as TestProps, 0)
const test = createTest({ err: { message: 'SomeError' } as Err, hooks: [
{ hookId: 'h1', hookName: 'before each' },
{ hookId: 'h2', hookName: 'before each' },
{ hookId: 'h3', hookName: 'before each' },
] })
test.addCommand(new Command({ hookId: 'h1' } as CommandProps))
test.addCommand(new Command({ hookId: 'h1' } as CommandProps))
test.addCommand(new Command({ hookId: 'h1' } as CommandProps))
expect(test.commandMatchingErr()).to.be.undefined
})
})
context('#isOpen', () => {
it('false by default', () => {
const test = createTest()
test.start({} as TestProps)
expect(test.isOpen).eq(false)
})
it('true when the model is long running', () => {
const test = createTest()
test.start({} as TestProps)
const command = test.addLog(createCommand()) as CommandModel
command.isLongRunning = true
expect(test.isOpen).eq(true)
})
it('true when there is only one test', () => {
const test = createTest({}, { hasSingleTest: true })
expect(test.isOpen).eq(true)
})
})
})

View File

@@ -1,195 +1,205 @@
import _ from 'lodash'
import { action, autorun, computed, observable, observe } from 'mobx'
import { action, computed, observable } from 'mobx'
import { FileDetails } from '@packages/ui-components'
import Attempt from '../attempts/attempt-model'
import Err from '../errors/err-model'
import Hook, { HookName } from '../hooks/hook-model'
import { HookProps } from '../hooks/hook-model'
import Runnable, { RunnableProps } from '../runnables/runnable-model'
import Command, { CommandProps } from '../commands/command-model'
import Agent, { AgentProps } from '../agents/agent-model'
import Route, { RouteProps } from '../routes/route-model'
import { CommandProps } from '../commands/command-model'
import { AgentProps } from '../agents/agent-model'
import { RouteProps } from '../routes/route-model'
import { RunnablesStore, LogProps } from '../runnables/runnables-store'
export type TestState = 'active' | 'failed' | 'pending' | 'passed' | 'processing'
export type UpdateTestCallback = () => void
export interface TestProps extends RunnableProps {
state: TestState
state: TestState | null
err?: Err
isOpen?: boolean
agents?: Array<AgentProps>
commands?: Array<CommandProps>
routes?: Array<RouteProps>
hooks: Array<HookProps>
prevAttempts?: Array<TestProps>
currentRetry: number
retries?: number
final?: boolean
invocationDetails?: FileDetails
}
export interface UpdatableTestProps {
id: TestProps['id']
state?: TestProps['state']
err?: TestProps['err']
hookId?: string
isOpen?: TestProps['isOpen']
currentRetry?: TestProps['currentRetry']
retries?: TestProps['retries']
}
export default class Test extends Runnable {
@observable agents: Array<Agent> = []
@observable commands: Array<Command> = []
@observable err = new Err({})
@observable hooks: Array<Hook> = []
// TODO: make this an enum with states: 'QUEUED, ACTIVE, INACTIVE'
@observable isActive: boolean | null = null
@observable isLongRunning = false
@observable isOpen = false
@observable routes: Array<Route> = []
@observable _state?: TestState | null = null
@observable _invocationCount: number = 0
@observable invocationDetails?: FileDetails
@observable hookCount: { [name in HookName]: number } = {
'before all': 0,
'before each': 0,
'after all': 0,
'after each': 0,
'test body': 0,
}
type = 'test'
callbackAfterUpdate: (() => void) | null = null
_callbackAfterUpdate: UpdateTestCallback | null = null
hooks: HookProps[]
invocationDetails?: FileDetails
constructor (props: TestProps, level: number) {
@observable attempts: Attempt[] = []
@observable _isOpen: boolean | null = null
@observable isOpenWhenActive: Boolean | null = null
@observable _isFinished = false
constructor (props: TestProps, level: number, private store: RunnablesStore) {
super(props, level)
this._state = props.state
this.err.update(props.err)
this.invocationDetails = props.invocationDetails
this.hooks = _.map(props.hooks, (hook) => new Hook(hook))
this.hooks.push(new Hook({
hookId: this.id.toString(),
this.hooks = [...props.hooks, {
hookId: props.id.toString(),
hookName: 'test body',
invocationDetails: this.invocationDetails,
}))
invocationDetails: props.invocationDetails,
}]
autorun(() => {
// if at any point, a command goes long running, set isLongRunning
// to true until the test becomes inactive
if (!this.isActive) {
action('became:inactive', () => {
return this.isLongRunning = false
})()
} else if (this._hasLongRunningCommand) {
action('became:long:running', () => {
return this.isLongRunning = true
})()
}
_.each(props.prevAttempts || [], (attempt) => this._addAttempt(attempt))
this._addAttempt(props)
}
@computed get isLongRunning () {
return _.some(this.attempts, (attempt: Attempt) => {
return attempt.isLongRunning
})
}
@computed get _hasLongRunningCommand () {
return _.some(this.commands, (command) => {
return command.isLongRunning
})
@computed get isOpen () {
if (this._isOpen === null) {
return Boolean(this.state === 'failed'
|| this.isLongRunning
|| this.isActive && (this.hasMultipleAttempts || this.isOpenWhenActive)
|| this.store.hasSingleTest)
}
return this._isOpen
}
@computed get state () {
return this._state || (this.isActive ? 'active' : 'processing')
return this.lastAttempt ? this.lastAttempt.state : 'active'
}
addAgent (agent: Agent) {
this.agents.push(agent)
@computed get err () {
return this.lastAttempt ? this.lastAttempt.err : new Err({})
}
addRoute (route: Route) {
this.routes.push(route)
@computed get lastAttempt () {
return _.last(this.attempts) as Attempt
}
addCommand (command: Command) {
this.commands.push(command)
const hookIndex = _.findIndex(this.hooks, { hookId: command.hookId })
const hook = this.hooks[hookIndex]
hook.addCommand(command)
// make sure that hooks are in order of invocation
if (hook.invocationOrder === undefined) {
hook.invocationOrder = this._invocationCount++
if (hook.invocationOrder !== hookIndex) {
this.hooks[hookIndex] = this.hooks[hook.invocationOrder]
this.hooks[hook.invocationOrder] = hook
}
}
// assign number if non existent
if (hook.hookNumber === undefined) {
hook.hookNumber = ++this.hookCount[hook.hookName]
}
@computed get hasMultipleAttempts () {
return this.attempts.length > 1
}
start () {
this.isActive = true
@computed get hasRetried () {
return this.state === 'passed' && this.hasMultipleAttempts
}
update ({ state, err, hookId, isOpen }: UpdatableTestProps, cb?: UpdateTestCallback) {
let hadChanges = false
// TODO: make this an enum with states: 'QUEUED, ACTIVE, INACTIVE'
@computed get isActive (): boolean {
return _.some(this.attempts, { isActive: true })
}
const disposer = observe(this, (change) => {
hadChanges = true
@computed get currentRetry () {
return this.attempts.length - 1
}
disposer()
isLastAttempt (attemptModel: Attempt) {
return this.lastAttempt === attemptModel
}
// apply change as-is
return change
addLog = (props: LogProps) => {
return this._withAttempt(props.testCurrentRetry, (attempt: Attempt) => {
return attempt.addLog(props)
})
}
if (cb) {
this.callbackAfterUpdate = () => {
this.callbackAfterUpdate = null
cb()
updateLog (props: LogProps) {
this._withAttempt(props.testCurrentRetry, (attempt: Attempt) => {
attempt.updateLog(props)
})
}
@action start (props: TestProps) {
let attempt = this.getAttemptByIndex(props.currentRetry)
if (!attempt) {
attempt = this._addAttempt(props)
}
attempt.start()
}
@action update (props: UpdatableTestProps, cb: UpdateTestCallback) {
if (props.isOpen != null) {
this.setIsOpenWhenActive(props.isOpen)
if (this.isOpen !== props.isOpen) {
this._callbackAfterUpdate = cb
return
}
}
this._state = state
this.err.update(err)
if (isOpen != null) {
this.isOpen = isOpen
}
cb()
}
if (hookId) {
const hook = _.find(this.hooks, { hookId })
// this is called to sync up the command log UI for the sake of
// screenshots, so we only ever need to open the last attempt
setIsOpenWhenActive (isOpen: boolean) {
this.isOpenWhenActive = isOpen
}
if (hook) {
hook.failed = true
}
}
// if we had no changes then react will
// never fire componentDidUpdate and
// so we need to manually call our callback
// https://github.com/cypress-io/cypress/issues/674#issuecomment-366495057
if (!hadChanges) {
// unbind the listener if no changes
disposer()
// if we had a callback, invoke it
if (this.callbackAfterUpdate) {
this.callbackAfterUpdate()
}
callbackAfterUpdate () {
if (this._callbackAfterUpdate) {
this._callbackAfterUpdate()
this._callbackAfterUpdate = null
}
}
finish (props: UpdatableTestProps) {
this.update(props)
this.isActive = false
@action finish (props: UpdatableTestProps) {
this._isFinished = !(props.retries && props.currentRetry) || props.currentRetry >= props.retries
this._withAttempt(props.currentRetry || 0, (attempt: Attempt) => {
attempt.finish(props)
})
}
getAttemptByIndex (attemptIndex: number) {
if (attemptIndex >= this.attempts.length) return
return this.attempts[attemptIndex || 0]
}
commandMatchingErr () {
return _(this.hooks)
.map((hook) => {
return hook.commandMatchingErr(this.err)
})
.compact()
.last()
return this.lastAttempt.commandMatchingErr()
}
_addAttempt = (props: TestProps) => {
props.invocationDetails = this.invocationDetails
props.hooks = this.hooks
const attempt = new Attempt(props, this)
this.attempts.push(attempt)
return attempt
}
_withAttempt<T> (attemptIndex: number, cb: (attempt: Attempt) => T) {
const attempt = this.getAttemptByIndex(attemptIndex)
if (attempt) return cb(attempt)
return null
}
}

View File

@@ -1,24 +1,22 @@
import _ from 'lodash'
import React from 'react'
import { mount, shallow, ReactWrapper } from 'enzyme'
import { shallow, mount, ReactWrapper } from 'enzyme'
import sinon, { SinonSpy } from 'sinon'
import Hooks from '../hooks/hooks'
import Test, { NoCommands } from './test'
import TestModel from './test-model'
import _ from 'lodash'
import Test from './test'
import TestModel, { TestState } from './test-model'
import { Scroller } from '../lib/scroller'
import { AppState } from '../lib/app-state'
const appStateStub = (props?: Partial<AppState>) => {
return _.extend<AppState>({
return {
autoScrollingEnabled: true,
isRunning: true,
}, props)
...props,
} as AppState
}
const model = (props?: Partial<TestModel>) => {
return _.extend<TestModel>({
return {
agents: [],
commands: [],
hooks: [],
@@ -30,8 +28,10 @@ const model = (props?: Partial<TestModel>) => {
shouldRender: true,
state: 'passed',
title: 'some title',
type: 'test',
}, props)
callbackAfterUpdate: () => {},
toggleOpen: sinon.stub(),
...props,
} as any
}
type ScrollerStub = Scroller & {
@@ -42,6 +42,8 @@ const scrollerStub = () => ({
scrollIntoView: sinon.spy(),
} as ScrollerStub)
const setTestState = (test:TestModel, state:TestState) => _.extend(test, { state })
describe('<Test />', () => {
it('does not render when it should not render', () => {
const component = shallow(<Test model={model({ shouldRender: false })} />)
@@ -49,88 +51,18 @@ describe('<Test />', () => {
expect(component).to.be.empty
})
context('open/closed', () => {
it('renders without is-open class by default', () => {
const component = mount(<Test model={model()} />)
expect(component.find('.collapsible').first()).not.to.have.className('is-open')
})
it('renders with is-open class when the model state is failed', () => {
const component = mount(<Test model={model({ state: 'failed' })} />)
expect(component.find('.collapsible').first()).to.have.className('is-open')
})
it('renders with is-open class when the model is long running', () => {
const component = mount(<Test model={model({ isLongRunning: true })} />)
expect(component.find('.collapsible').first()).to.have.className('is-open')
})
it('renders with is-open class when there is only one test', () => {
const component = mount(<Test model={model({ isLongRunning: true })} />)
expect(component.find('.collapsible').first()).to.have.className('is-open')
})
context('toggling', () => {
it('renders without is-open class when already open', () => {
const component = mount(<Test model={model({ state: 'failed' })} />)
component.find('.collapsible-header').first().simulate('click')
expect(component.find('.collapsible').first()).not.to.have.className('is-open')
})
it('renders with is-open class when not already open', () => {
const component = mount(<Test model={model()} />)
component.find('.collapsible-header').first().simulate('click')
expect(component.find('.collapsible').first()).to.have.className('is-open')
})
it('renders without is-open class when toggled again', () => {
const component = mount(<Test model={model()} />)
component.find('.collapsible-header').first().simulate('click')
component.find('.collapsible-header').first().simulate('click')
expect(component.find('.collapsible').first()).not.to.have.className('is-open')
})
})
})
context('contents', () => {
it('does not render the contents if not open', () => {
const component = mount(<Test model={model()} />)
const component = mount(<Test model={model({ isOpen: false })} />)
expect(component.find('.runnable-instruments')).to.be.empty
})
it('renders the contents if open', () => {
const component = mount(<Test model={model({ state: 'failed' })} />)
const component = mount(<Test model={model({ isOpen: true })} />)
expect(component.find('.runnable-instruments')).not.to.be.empty
})
it('renders <Hooks /> if there are commands', () => {
const component = shallow(<Test model={model({ commands: [{ id: 1, hookId: 'h1' }], hooks: [{ hookId: 'h1' }], state: 'failed' } as TestModel)} />)
expect(component.find(Hooks)).to.exist
})
it('renders <NoCommands /> is no commands', () => {
const component = shallow(<Test model={model({ state: 'failed' })} />)
expect(component.find(NoCommands)).to.exist
})
it('stops propagation when clicked', () => {
const component = mount(<Test model={model({ state: 'failed' })} />)
const e = { stopPropagation: sinon.spy() }
component.find('.collapsible-header').first().simulate('click', e)
expect(e.stopPropagation).to.have.been.called
})
})
context('scrolling into view', () => {
@@ -195,11 +127,11 @@ describe('<Test />', () => {
expect(scroller.scrollIntoView).not.to.have.been.called
})
it('does not scroll into view if model.isActive is null', () => {
it('does not scroll into view if model.state is processing', () => {
mount(
<Test
appState={appStateStub()}
model={model({ isActive: null })}
model={model({ state: 'processing' })}
scroller={scroller}
/>,
)
@@ -215,30 +147,21 @@ describe('<Test />', () => {
beforeEach(() => {
appState = appStateStub({ autoScrollingEnabled: false, isRunning: false })
testModel = model({ isActive: null })
testModel = model({ state: 'processing' })
component = mount(<Test appState={appState} model={testModel} scroller={scroller} />)
})
it('scrolls into view if auto-scrolling is enabled, app is running, the model should render, and the model.isActive is null', () => {
appState.id = 'fooo'
appState.autoScrollingEnabled = true
appState.isRunning = true
testModel.isActive = true
testModel.shouldRender = true
component.instance()!.componentDidUpdate!({}, {})
expect(scroller.scrollIntoView).to.have.been.calledWith((component.instance() as any).containerRef.current)
})
it('does not scroll into view if auto-scrolling is disabled', () => {
appState.isRunning = true
testModel.isActive = true
setTestState(testModel, 'processing')
component.instance()!.componentDidUpdate!({}, {})
expect(scroller.scrollIntoView).not.to.have.been.called
})
it('does not scroll into view if app is not running', () => {
appState.autoScrollingEnabled = true
testModel.isActive = true
setTestState(testModel, 'processing')
component.instance()!.componentDidUpdate!({}, {})
expect(scroller.scrollIntoView).not.to.have.been.called
})

View File

@@ -1,31 +1,20 @@
import { action, observable } from 'mobx'
import { observer } from 'mobx-react'
import React, { Component, createRef, RefObject } from 'react'
// @ts-ignore
import Tooltip from '@cypress/react-tooltip'
import events, { Events } from '../lib/events'
import appState, { AppState } from '../lib/app-state'
import Collapsible from '../collapsible/collapsible'
import { indent } from '../lib/util'
import runnablesStore, { RunnablesStore } from '../runnables/runnables-store'
import TestModel from './test-model'
import scroller, { Scroller } from '../lib/scroller'
import Hooks from '../hooks/hooks'
import Agents from '../agents/agents'
import Routes from '../routes/routes'
import TestError from '../errors/test-error'
import TestModel from './test-model'
const NoCommands = observer(() => (
<ul className='hooks-container'>
<li className='no-commands'>
No commands were issued in this test.
</li>
</ul>
))
import Attempts from '../attempts/attempts'
interface Props {
events: Events
appState: AppState
runnablesStore: RunnablesStore
scroller: Scroller
@@ -35,13 +24,12 @@ interface Props {
@observer
class Test extends Component<Props> {
static defaultProps = {
events,
appState,
runnablesStore,
scroller,
}
@observable isOpen: boolean | null = null
containerRef: RefObject<HTMLDivElement>
constructor (props: Props) {
@@ -56,19 +44,14 @@ class Test extends Component<Props> {
componentDidUpdate () {
this._scrollIntoView()
const cb = this.props.model.callbackAfterUpdate
if (cb) {
cb()
}
this.props.model.callbackAfterUpdate()
}
_scrollIntoView () {
const { appState, model, scroller } = this.props
const { isActive, shouldRender } = model
const { state, shouldRender } = model
if (appState.autoScrollingEnabled && appState.isRunning && shouldRender && isActive != null) {
if (appState.autoScrollingEnabled && appState.isRunning && shouldRender && state !== 'processing') {
window.requestAnimationFrame(() => {
// since this executes async in a RAF the ref might be null
if (this.containerRef.current) {
@@ -90,7 +73,7 @@ class Test extends Component<Props> {
headerClass='runnable-wrapper'
headerStyle={{ paddingLeft: indent(model.level) }}
contentClass='runnable-instruments'
isOpen={this._shouldBeOpen()}
isOpen={model.isOpen}
>
{this._contents()}
</Collapsible>
@@ -119,37 +102,11 @@ class Test extends Component<Props> {
return (
<div style={{ paddingLeft: indent(model.level) }}>
<Agents model={model} />
<Routes model={model} />
<div className='runnable-commands-region'>
{model.commands.length ? <Hooks model={model} /> : <NoCommands />}
</div>
<TestError model={model} />
<Attempts test={model} scrollIntoView={() => this._scrollIntoView()} />
</div>
)
}
_shouldBeOpen () {
// if this.isOpen is non-null, prefer that since the user has
// explicity chosen to open or close the test
if (this.isOpen !== null) return this.isOpen
// otherwise, look at reasons to auto-open the test
return this.props.model.state === 'failed'
|| this.props.model.isOpen
|| this.props.model.isLongRunning
|| this.props.runnablesStore.hasSingleTest
}
@action _toggleOpen = () => {
if (this.isOpen === null) {
this.isOpen = !this._shouldBeOpen()
} else {
this.isOpen = !this.isOpen
}
}
}
export { NoCommands }
export default Test

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
const helpers = require('../support/helpers')
const _ = Cypress._
const { runIsolatedCypress } = helpers.createCypress()
const { runIsolatedCypress } = helpers.createCypress({ config: { isTextTerminal: true, retries: 0 } })
export const verifyFailure = (options) => {
const {

View File

@@ -0,0 +1,338 @@
const helpers = require('../support/helpers')
const { shouldHaveTestResults, getRunState, cleanseRunStateMap } = helpers
const { runIsolatedCypress, snapshotMochaEvents, getAutCypress } = helpers.createCypress({ config: { retries: 2, isTextTerminal: true } })
const { sinon } = Cypress
const match = Cypress.sinon.match
const threeTestsWithRetry = {
suites: {
'suite 1': {
hooks: ['before', 'beforeEach', 'afterEach', 'after'],
tests: [
'test 1',
{ name: 'test 2', fail: 2 },
'test 3',
],
},
},
}
describe('src/cypress/runner retries mochaEvents', () => {
// NOTE: for test-retries
it('can set retry config', () => {
runIsolatedCypress({}, { config: { retries: 1 } })
.then(({ autCypress }) => {
expect(autCypress.config()).to.has.property('retries', 1)
})
})
it('simple retry', () => {
runIsolatedCypress({
suites: {
'suite 1': {
tests: [
{ name: 'test 1',
fail: 1,
},
],
},
},
}, { config: { retries: 1 } })
.then(snapshotMochaEvents)
})
it('test retry with hooks', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: ['before', 'beforeEach', 'afterEach', 'after'],
tests: [{ name: 'test 1', fail: 1 }],
},
},
}, { config: { retries: 1 } })
.then(snapshotMochaEvents)
})
it('test retry with [only]', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: ['before', 'beforeEach', 'afterEach', 'after'],
tests: [
{ name: 'test 1' },
{ name: 'test 2', fail: 1, only: true },
{ name: 'test 3' },
],
},
},
}, { config: { retries: 1 } })
.then(snapshotMochaEvents)
})
it('can retry from [beforeEach]', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: [
'before',
'beforeEach',
{ type: 'beforeEach', fail: 1 },
'beforeEach',
'afterEach',
'after',
],
tests: [{ name: 'test 1' }],
},
},
}, { config: { retries: 1 } })
.then(snapshotMochaEvents)
})
it('can retry from [afterEach]', () => {
runIsolatedCypress({
hooks: [{ type: 'afterEach', fail: 1 }],
suites: {
'suite 1': {
hooks: [
'before',
'beforeEach',
'beforeEach',
'afterEach',
'after',
],
tests: [{ name: 'test 1' }, 'test 2', 'test 3'],
},
'suite 2': {
hooks: [{ type: 'afterEach', fail: 2 }],
tests: ['test 1'],
},
'suite 3': {
tests: ['test 1'],
},
},
}, { config: { retries: 2, isTextTerminal: true } })
.then(snapshotMochaEvents)
})
it('cant retry from [before]', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: [
{ type: 'before', fail: 1 },
'beforeEach',
'beforeEach',
'afterEach',
'afterEach',
'after',
],
tests: [{ name: 'test 1' }],
},
},
}, { config: { retries: 1 } })
.then(snapshotMochaEvents)
})
it('cant retry from [after]', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: [
'before',
'beforeEach',
'beforeEach',
'afterEach',
'afterEach',
{ type: 'after', fail: 1 },
],
tests: [{ name: 'test 1' }],
},
},
}, { config: { retries: 1 } })
.then(snapshotMochaEvents)
})
it('three tests with retry', () => {
runIsolatedCypress(threeTestsWithRetry, {
config: {
retries: 2,
},
})
.then(snapshotMochaEvents)
})
describe('screenshots', () => {
it('retry screenshot in test body', () => {
let onAfterScreenshot
runIsolatedCypress({
suites: {
'suite 1': {
tests: [
{
name: 'test 1',
fn: () => {
cy.screenshot()
cy.then(() => assert(false))
},
eval: true,
},
],
},
},
}, { config: { retries: 1 },
onBeforeRun ({ autCypress }) {
autCypress.Screenshot.onAfterScreenshot = cy.stub()
onAfterScreenshot = cy.stub()
autCypress.on('after:screenshot', onAfterScreenshot)
},
})
.then(({ autCypress }) => {
expect(autCypress.automation.withArgs('take:screenshot')).callCount(4)
expect(autCypress.automation.withArgs('take:screenshot').args).matchDeep([
{ 1: { testAttemptIndex: 0 } },
{ 1: { testAttemptIndex: 0 } },
{ 1: { testAttemptIndex: 1 } },
{ 1: { testAttemptIndex: 1 } },
])
expect(autCypress.automation.withArgs('take:screenshot').args[0]).matchSnapshot({ startTime: match.string, testAttemptIndex: match(0) })
expect(onAfterScreenshot.args[0][0]).to.matchSnapshot({ testAttemptIndex: match(0) })
expect(onAfterScreenshot.args[2][0]).to.matchDeep({ testAttemptIndex: 1 })
})
})
it('retry screenshot in hook', () => {
let onAfterScreenshot
runIsolatedCypress({
suites: {
'suite 1': {
hooks: [
{
type: 'beforeEach',
fn: () => {
cy.screenshot()
cy.then(() => assert(false))
},
eval: true,
},
],
tests: [
{
name: 'test 1',
},
],
},
},
}, { config: { retries: 1 },
onBeforeRun ({ autCypress }) {
autCypress.Screenshot.onAfterScreenshot = cy.stub()
onAfterScreenshot = cy.stub()
autCypress.on('after:screenshot', onAfterScreenshot)
},
})
.then(({ autCypress }) => {
expect(autCypress.automation.withArgs('take:screenshot')).callCount(4)
expect(autCypress.automation.withArgs('take:screenshot').args).matchDeep([
{ 1: { testAttemptIndex: 0 } },
{ 1: { testAttemptIndex: 0 } },
{ 1: { testAttemptIndex: 1 } },
{ 1: { testAttemptIndex: 1 } },
])
expect(onAfterScreenshot.args[0][0]).matchDeep({ testAttemptIndex: 0 })
expect(onAfterScreenshot.args[2][0]).matchDeep({ testAttemptIndex: 1 })
})
})
})
describe('save/reload state', () => {
const serializeState = () => {
return getRunState(getAutCypress())
}
const loadStateFromSnapshot = (cypressConfig, name) => {
cy.task('getSnapshot', {
file: Cypress.spec.name,
exactSpecName: name,
})
.then((state) => {
cypressConfig[1].state = state
})
}
// NOTE: for test-retries
describe('retries rehydrate spec state after navigation', () => {
let realState
let runCount = 0
const failThenSerialize = () => {
if (!runCount++) {
assert(false, 'stub 3 fail')
}
assert(true, 'stub 3 pass')
return realState = serializeState()
}
let runCount2 = 0
const failOnce = () => {
if (!runCount2++) {
assert(false, 'stub 2 fail')
}
assert(true, 'stub 2 pass')
}
const stub1 = sinon.stub()
const stub2 = sinon.stub().callsFake(failOnce)
const stub3 = sinon.stub().callsFake(failThenSerialize)
let cypressConfig = [
{
suites: {
'suite 1': {
hooks: [
'before',
'beforeEach',
'afterEach',
'after',
],
tests: [{ name: 'test 1', fn: stub1 }],
},
'suite 2': {
tests: [
{ name: 'test 1', fn: stub2 },
{ name: 'test 2', fn: stub3 },
'test 3',
],
},
},
}, { config: { retries: 1 } },
]
it('1/2', () => {
runIsolatedCypress(...cypressConfig)
.then(shouldHaveTestResults(4, 0))
.then(() => {
expect(realState).to.matchSnapshot(cleanseRunStateMap, 'serialize state - retries')
})
})
it('2/2', () => {
loadStateFromSnapshot(cypressConfig, 'serialize state - retries')
runIsolatedCypress(...cypressConfig)
.then(shouldHaveTestResults(4, 0))
.then(() => {
expect(stub1).to.calledOnce
expect(stub2).to.calledTwice
expect(stub3).calledThrice
})
})
})
})
})

View File

@@ -0,0 +1,491 @@
const helpers = require('../support/helpers')
const { shouldHaveTestResults, containText } = helpers
const { runIsolatedCypress } = helpers.createCypress({ config: { retries: 2 } })
const getAttemptTag = (sel) => {
return cy.get(`.test.runnable:contains(${sel}) .attempt-tag`)
}
const shouldBeOpen = ($el) => cy.wrap($el).parentsUntil('.collapsible').last().parent().should('have.class', 'is-open')
const attemptTag = (sel) => `.attempt-tag:contains(Attempt ${sel})`
const cyReject = (fn) => {
return () => {
try {
fn()
} catch (e) {
cy.state('reject')(e)
}
}
}
describe('runner/cypress retries.ui.spec', { viewportWidth: 600, viewportHeight: 900 }, () => {
it('collapses tests that retry and pass', () => {
runIsolatedCypress({
suites: {
'suite 1': {
tests: [
{ name: 'test pass', fail: 0 },
{ name: 'test pass on 2nd attempt', fail: 1 },
],
},
},
})
.then(shouldHaveTestResults(2, 0))
cy.percySnapshot()
})
it('collapses prev attempts and keeps final one open on failure', () => {
runIsolatedCypress({
suites: {
'suite 1': {
tests: [
{ name: 'test 1',
fail: true,
},
{ name: 'test 2',
},
],
},
},
}, { config: { retries: 2 } })
.then(shouldHaveTestResults(1, 1))
cy.percySnapshot()
})
it('can toggle failed prev attempt open and log its error', () => {
runIsolatedCypress({
suites: {
'suite 1': {
tests: [
{ name: 'test 1', fail: 1 },
{ name: 'test 2', fail: 2 },
{ name: 'test 3', fail: 1 },
],
},
},
}, { config: { retries: 1 } })
.then(shouldHaveTestResults(2, 1))
.then(() => {
cy.contains('Attempt 1')
.click()
.closest('.attempt-item')
.find('.runnable-err-print')
.click()
cy.get('@console_error').should('be.calledWithMatch', 'AssertionError: test 2')
})
cy.percySnapshot()
})
it('opens attempt on each attempt failure for the screenshot, and closes after test passes', { retries: 2 }, () => {
let stub
runIsolatedCypress(
{
suites: {
's1': {
tests: [
't1',
{
name: 't2',
fail: 3,
},
't3',
],
},
},
}, { config: { retries: 3, isTextTerminal: true },
onBeforeRun ({ autCypress }) {
let attempt = 0
stub = cy.stub().callsFake(cyReject(() => {
attempt++
const $attemptCollapsible = cy.$$(attemptTag(attempt))
.parentsUntil('.collapsible').last().parent()
expect($attemptCollapsible).have.class('is-open')
}))
autCypress.Screenshot.onAfterScreenshot = stub
},
},
).then(() => {
expect(stub).callCount(3)
cy.get('.test.runnable:contains(t2)').then(($el) => {
expect($el).not.class('is-open')
})
})
})
it('includes routes, spies, hooks, and commands in attempt', () => {
runIsolatedCypress({
suites: {
's1': {
hooks: [{ type: 'beforeEach', fail: 1, agents: true }],
tests: [{ name: 't1', fail: 1, agents: true }],
},
},
})
.then(shouldHaveTestResults(1, 0))
.then(() => {
cy.get(attemptTag(1)).click().parentsUntil('.collapsible').last().parent().within(() => {
cy.get('.instruments-container').should('contain', 'Spies / Stubs (1)')
cy.get('.instruments-container').should('contain', 'Routes (1)')
cy.get('.runnable-err').should('contain', 'AssertionError')
})
cy.get(attemptTag(2)).click().parentsUntil('.collapsible').last().parent().within(() => {
cy.get('.instruments-container').should('contain', 'Spies / Stubs (2)')
cy.get('.instruments-container').should('contain', 'Routes (2)')
cy.get('.runnable-err').should('contain', 'AssertionError')
})
cy.get(attemptTag(3)).parentsUntil('.collapsible').last().parent().within(() => {
cy.get('.instruments-container').should('contain', 'Spies / Stubs (2)')
cy.get('.instruments-container').should('contain', 'Routes (2)')
cy.get('.runnable-err').should('not.contain', 'AssertionError')
})
})
cy.percySnapshot()
})
describe('only', () => {
it('test retry with [only]', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: ['before', 'beforeEach', 'afterEach', 'after'],
tests: [
{ name: 'test 1' },
{ name: 'test 2', fail: 1, only: true },
{ name: 'test 3' },
],
},
},
}, { config: { retries: 1 } })
.then(shouldHaveTestResults(1, 0))
.then(() => {
cy.contains('test 2')
cy.contains('test 1').should('not.exist')
cy.contains('test 3').should('not.exist')
})
cy.percySnapshot()
})
})
describe('beforeAll', () => {
// TODO: make beforeAll hooks retry
it('tests do not retry when beforeAll fails', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: [
{ type: 'before', fail: 1 },
'beforeEach',
'beforeEach',
'afterEach',
'afterEach',
'after',
],
tests: ['test 1'],
},
},
}, { config: { retries: 1 } })
.then(shouldHaveTestResults(0, 1))
.then(() => {
cy.contains('Although you have test retries')
})
cy.percySnapshot()
})
// TODO: future versions should run all hooks associated with test on retry
it('before all hooks are not run on the second attempt when fails outside of beforeAll', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: ['before', 'beforeEach', 'afterEach', 'after'],
tests: [{ name: 'test 1', fail: 1 }],
},
},
}, { config: { retries: 1 } })
.then(shouldHaveTestResults(1, 0))
.then(() => {
cy.contains('test')
cy.contains('after all')
cy.contains('before all').should('not.exist')
})
cy.percySnapshot()
})
})
describe('beforeEach', () => {
it('beforeEach hooks retry on failure, but only run same-level afterEach hooks', () => {
runIsolatedCypress({
hooks: [{ type: 'beforeEach', fail: 1 }],
suites: {
'suite 1': {
hooks: [
'before',
'beforeEach',
{ type: 'beforeEach', fail: 1 },
'beforeEach',
'afterEach',
'after',
],
tests: [{ name: 'test 1' }],
},
},
}, { config: { retries: 2 } })
.then(shouldHaveTestResults(1, 0))
.then(() => {
cy.contains('Attempt 1').click()
cy.get('.attempt-1 .hook-item .collapsible:contains(before each)').find('.command-state-failed')
cy.get('.attempt-1 .hook-item .collapsible:contains(before each (2))').should('not.exist')
cy.get('.attempt-1 .hook-item .collapsible:contains(test body)').should('not.exist')
cy.get('.attempt-1 .hook-item .collapsible:contains(after each)').should('not.exist')
cy.get('.attempt-1 .hook-item .collapsible:contains(after all)').should('not.exist')
cy.contains('Attempt 2').click()
cy.get('.attempt-2 .hook-item .collapsible:contains(before each)')
cy.get('.attempt-2 .hook-item .collapsible:contains(before each (2))')
cy.get('.attempt-2 .hook-item .collapsible:contains(before each (3))').find('.command-state-failed')
cy.get('.attempt-2 .hook-item .collapsible:contains(test body)').should('not.exist')
cy.get('.attempt-2 .hook-item .collapsible:contains(after each)')
cy.get('.attempt-2 .hook-item .collapsible:contains(after all)').should('not.exist')
cy.get('.attempt-3 .hook-item .collapsible:contains(before each)')
cy.get('.attempt-3 .hook-item .collapsible:contains(before each (2))')
cy.get('.attempt-3 .hook-item .collapsible:contains(before each (3))')
cy.get('.attempt-3 .hook-item .collapsible:contains(before each (4))')
cy.get('.attempt-3 .hook-item .collapsible:contains(test body)')
cy.get('.attempt-3 .hook-item .collapsible:contains(after each)')
cy.get('.attempt-3 .hook-item .collapsible:contains(after all)')
})
cy.percySnapshot()
})
it('beforeEach retried tests skip remaining tests in suite', () => {
runIsolatedCypress({ suites: {
'beforeEach hooks': {
hooks: [{ type: 'beforeEach', fail: true }],
tests: ['fails in beforeEach', 'skips this'],
},
} }, { config: { retries: 1 } })
.then(shouldHaveTestResults(0, 1, 0))
cy.percySnapshot()
})
})
describe('afterEach', () => {
it('afterEach hooks retry on failure, but only run higher-level afterEach hooks', () => {
runIsolatedCypress({
hooks: [{ type: 'afterEach', fail: 2 }],
suites: {
's1': {
hooks: [{ type: 'afterEach', fail: 1 }, 'afterEach', 'after'],
tests: ['t1'],
},
},
}, { config: { retries: 2 } })
.then(shouldHaveTestResults(1, 0))
.then(() => {
cy.contains('Attempt 1')
.click()
.then(shouldBeOpen)
cy.get('.attempt-1 .hook-item .collapsible:contains(after each (1))').find('.command-state-failed')
cy.get('.attempt-1 .hook-item .collapsible:contains(after each (2))')
cy.get('.attempt-1 .hook-item .collapsible:contains(after each (3))').should('not.exist')
cy.get('.attempt-1 .hook-item .collapsible:contains(after all)').should('not.exist')
cy.contains('Attempt 2').click()
.then(shouldBeOpen)
cy.get('.attempt-2 .hook-item .collapsible:contains(after each (1))')
cy.get('.attempt-2 .hook-item .collapsible:contains(after each (2))')
cy.get('.attempt-2 .hook-item .collapsible:contains(after each (3))').find('.command-state-failed')
cy.get('.attempt-2 .hook-item .collapsible:contains(after all)').should('not.exist')
cy.get('.attempt-tag').should('have.length', 3)
cy.get('.attempt-2 .hook-item .collapsible:contains(after each (1))')
cy.get('.attempt-2 .hook-item .collapsible:contains(after each (2))')
cy.get('.attempt-2 .hook-item .collapsible:contains(after each (3))')
cy.get('.attempt-3 .hook-item .collapsible:contains(after all)')
})
cy.percySnapshot()
})
it('afterEach retried tests skip remaining tests in suite', () => {
runIsolatedCypress({ suites: {
'afterEach hooks': {
hooks: [{ type: 'afterEach', fail: true }],
tests: ['fails in afterEach', 'skips this'],
},
} }, { config: { retries: 1 } })
.then(shouldHaveTestResults(0, 1, 0))
cy.percySnapshot()
})
})
describe('afterAll', () => {
it('only run afterAll hook on last attempt', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: [
'before',
'beforeEach',
'afterEach',
'after',
],
tests: [
{ name: 'test 1' },
{ name: 'test 2' },
{ name: 'test 3', fail: 1 },
],
},
},
}, { config: { retries: 1 } })
.then(shouldHaveTestResults(3, 0))
.then(() => {
cy.contains('test 3').click()
getAttemptTag('test 3').first().click()
cy.contains('.attempt-1', 'after all').should('not.exist')
cy.contains('.attempt-2', 'after all')
})
})
it('tests do not retry when afterAll fails', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: [
'before',
'beforeEach',
'beforeEach',
'afterEach',
'afterEach',
{ type: 'after', fail: 1 },
],
tests: [{ name: 'test 1' }],
},
},
}, { config: { retries: 1 } })
.then(shouldHaveTestResults(0, 1))
.then(() => {
cy.contains('Although you have test retries')
cy.get('.runnable-err-print').click()
cy.get('@console_error').its('lastCall').should('be.calledWithMatch', 'Error')
})
cy.percySnapshot()
})
})
describe('can configure retries', () => {
const haveCorrectError = ($el) => cy.wrap($el).last().parentsUntil('.collapsible').last().parent().find('.runnable-err').should('contain', 'Unspecified AssertionError')
it('via config value', () => {
runIsolatedCypress({
suites: {
'suite 1': () => {
it('[no retry]', { retries: 0 }, () => assert(false))
it('[1 retry]', { retries: 1 }, () => assert(false))
it('[2 retries]', { retries: 2 }, () => assert(false))
it('[open mode, no retry]', { retries: { runMode: 2, openMode: 0 } }, () => assert(false))
it('[run mode, retry]', { retries: { runMode: 1, openMode: 0 }, isInteractive: false }, () => assert(false))
it('[open mode, 2 retries]', { isInteractive: true }, () => assert(false))
describe('suite 2', { retries: 1 }, () => {
it('[set retries on suite]', () => assert(false))
})
},
},
})
.then(shouldHaveTestResults(0, 7))
.then(() => {
getAttemptTag('[no retry]').should('have.length', 1).then(haveCorrectError)
getAttemptTag('[1 retry]').should('have.length', 2).then(haveCorrectError)
getAttemptTag('[2 retries]').should('have.length', 3).then(haveCorrectError)
getAttemptTag('[open mode, no retry]').should('have.length', 1).then(haveCorrectError)
getAttemptTag('[run mode, retry]').should('have.length', 2).then(haveCorrectError)
getAttemptTag('[open mode, 2 retries]').should('have.length', 3).then(haveCorrectError)
getAttemptTag('[set retries on suite]').should('have.length', 2).then(haveCorrectError)
})
})
it('throws when set via this.retries in test', () => {
runIsolatedCypress({
suites: {
'suite 1' () {
it('tries to set mocha retries', function () {
this.retries(null)
})
},
},
})
.then(shouldHaveTestResults(0, 1))
.then(() => {
cy.get('.runnable-err').should(containText(`it('tries to set mocha retries', { retries: 2 }, () => `))
})
cy.percySnapshot()
})
it('throws when set via this.retries in hook', () => {
runIsolatedCypress({
suites: {
'suite 1' () {
beforeEach(function () {
this.retries(0)
})
it('foo', () => {})
},
},
})
.then(shouldHaveTestResults(0, 1))
.then(() => {
cy.get('.runnable-err').should(containText(`describe('suite 1', { retries: 0 }, () => `))
})
cy.percySnapshot()
})
it('throws when set via this.retries in suite', () => {
runIsolatedCypress({
suites: {
'suite 1' () {
this.retries(3)
it('test 1', () => {
})
},
},
})
.then(shouldHaveTestResults(0, 1))
.then(() => {
cy.get('.runnable-err')
.should(containText(`describe('suite 1', { retries: 3 }, () => `))
})
cy.percySnapshot()
})
})
})

View File

@@ -3,7 +3,7 @@ const sinon = require('sinon')
const helpers = require('../support/helpers')
const { cleanseRunStateMap, shouldHaveTestResults, getRunState } = helpers
const { runIsolatedCypress, snapshotMochaEvents, onInitialized, getAutCypress } = helpers.createCypress()
const { runIsolatedCypress, snapshotMochaEvents, getAutCypress } = helpers.createCypress({ config: { isTextTerminal: true, retries: 0 } })
const simpleSingleTest = {
suites: { 'suite 1': { tests: [{ name: 'test 1' }] } },
@@ -239,17 +239,24 @@ describe('src/cypress/runner', () => {
})
})
describe('screenshots', () => {
let onAfterScreenshotListener
beforeEach(() => {
onInitialized((autCypress) => {
autCypress.Screenshot.onAfterScreenshot = cy.stub()
onAfterScreenshotListener = cy.stub()
autCypress.on('after:screenshot', onAfterScreenshotListener)
})
it('buffer mocha pass event when fail in afterEach hooks', () => {
runIsolatedCypress({
suites: {
'suite 1': {
suites: {
'suite 1-1': {
hooks: [{ type: 'afterEach', fail: true }],
tests: ['test 1'],
},
},
},
},
}).then(({ mochaStubs }) => {
expect(_.find(mochaStubs.args, { 1: 'pass' })).not.exist
})
})
describe('screenshots', () => {
it('screenshot after failed test', () => {
runIsolatedCypress({
suites: {

View File

@@ -147,6 +147,31 @@ describe('src/cypress/runner', () => {
describe('hook failures', () => {
describe('test failures w/ hooks', () => {
it('test [only]', () => {
runIsolatedCypress({
suites: {
'suite 1': {
hooks: ['before', 'beforeEach', 'afterEach', 'after'],
tests: [
{ name: 'test 1' },
{ name: 'test 2', only: true },
{ name: 'test 3' },
],
},
},
}).then(shouldHaveTestResults(1, 0))
})
it('test [pending]', () => {
runIsolatedCypress(() => {
before(() => {})
it('t1')
it('t2')
it('t3')
after(() => {})
}).then(shouldHaveTestResults(0, 0, 3))
})
it('fail with [before]', () => {
runIsolatedCypress({
suites: {
@@ -263,5 +288,31 @@ describe('src/cypress/runner', () => {
})
.then(shouldHaveTestResults(0, 1))
})
it('scrolls each command into view', () => {
// HACK to assert on the dom DURING the runIsolatedCypress run
// we expect the last command item to be scrolled into view before
// the test ends
cy.now('get', '.command-number:contains(25)')
.then(($el) => {
return new Promise((resolve) => {
requestAnimationFrame(() => {
expect($el).visible
resolve()
})
})
})
.catch((e) => cy.state('reject')(e))
runIsolatedCypress(() => {
describe('s1', () => {
// eslint-disable-next-line
it('t1', (done) => {
cy.timeout(10)
Cypress._.times(25, () => expect(true).ok)
})
})
})
})
})
})

View File

@@ -1,11 +1,14 @@
// static file server that serves fixtures needed for testing
require('@packages/driver/cypress/plugins/server')
const { getSnapshot, saveSnapshot } = require('./snapshot/snapshotPlugin')
const percyHealthCheck = require('@percy/cypress/task')
/**
* @type {Cypress.PluginConfig}
*/
module.exports = (on) => {
on('task', percyHealthCheck)
on('task', {
getSnapshot,
saveSnapshot,

View File

@@ -21,7 +21,7 @@ function throwErr (e, message, exp, ctx) {
}
}
function getMatchDeepMessage ({ act, exp }) {
function getMatchDeepMessage (act, exp) {
return `Expected **${chai.util.objDisplay(act)}** to deep match: **${chai.util.objDisplay(exp)}**`
}

View File

@@ -37,6 +37,18 @@ const mochaEventCleanseMap = {
end: match.date,
}
const cleanseRunStateMap = {
...eventCleanseMap,
'err.stack': '[err stack]',
wallClockStartedAt: new Date(0),
wallClockDuration: 1,
fnDuration: 1,
afterFnDuration: 1,
lifecycle: 1,
duration: 1,
startTime: new Date(0),
}
const spyOn = (obj, prop, fn) => {
const _fn = obj[prop]
@@ -49,7 +61,7 @@ const spyOn = (obj, prop, fn) => {
}
}
function createCypress () {
function createCypress (defaultOptions = {}) {
/**
* @type {sinon.SinonStub}
*/
@@ -84,19 +96,13 @@ function createCypress () {
window.Cypress = backupCypress
})
let onInitializedListeners = []
const onInitialized = function (fn) {
onInitializedListeners.push(fn)
}
/**
* Spawns an isolated Cypress runner as the AUT, with provided spec/fixture and optional state/config
* @param {string | ()=>void | {[key:string]: any}} mochaTestsOrFile
* @param {{state?: any, config?: any}} opts
*/
const runIsolatedCypress = (mochaTestsOrFile, opts = {}) => {
_.defaultsDeep(opts, {
_.defaultsDeep(opts, defaultOptions, {
state: {},
config: { video: false },
onBeforeRun () {},
@@ -106,9 +112,9 @@ function createCypress () {
.then({ timeout: 60000 }, (win) => {
win.runnerWs.destroy()
allStubs = cy.stub().snapshot(enableStubSnapshots)
mochaStubs = cy.stub().snapshot(enableStubSnapshots)
setRunnablesStub = cy.stub().snapshot(enableStubSnapshots)
allStubs = cy.stub().snapshot(enableStubSnapshots).log(false)
mochaStubs = cy.stub().snapshot(enableStubSnapshots).log(false)
setRunnablesStub = cy.stub().snapshot(enableStubSnapshots).log(false)
return new Promise((resolve) => {
const runIsolatedCypress = () => {
@@ -118,7 +124,7 @@ function createCypress () {
const emitMap = autCypress.emitMap
const emitThen = autCypress.emitThen
cy.stub(autCypress, 'automation').snapshot(enableStubSnapshots)
cy.stub(autCypress, 'automation').log(false).snapshot(enableStubSnapshots)
.callThrough()
.withArgs('clear:cookies')
.resolves({
@@ -177,7 +183,9 @@ function createCypress () {
spyOn(autCypress.mocha.getRunner(), 'fail', (...args) => {
Cypress.log({
name: 'Runner Fail',
name: 'Runner (fail event)',
ended: true,
event: true,
message: `${args[1]}`,
state: 'failed',
consoleProps: () => {
@@ -191,28 +199,26 @@ function createCypress () {
// TODO: clean this up, sinon doesn't like wrapping things multiple times
// and this catches that error
try {
cy.spy(cy.state('window').console, 'log').as('console_log')
cy.spy(cy.state('window').console, 'error').as('console_error')
cy.spy(cy.state('window').console, 'log').as('console_log').log(false)
cy.spy(cy.state('window').console, 'error').as('console_error').log(false)
} catch (_e) {
// console was already wrapped, noop
}
onInitializedListeners.forEach((fn) => fn(autCypress))
onInitializedListeners = []
autCypress.run((failed) => {
resolve({ failed, mochaStubs, autCypress, win })
})
}
cy.spy(win.eventManager.reporterBus, 'emit').snapshot(enableStubSnapshots).as('reporterBus')
cy.spy(win.eventManager.localBus, 'emit').snapshot(enableStubSnapshots).as('localBus')
cy.spy(win.eventManager.reporterBus, 'emit').snapshot(enableStubSnapshots).log(false).as('reporterBus')
cy.spy(win.eventManager.localBus, 'emit').snapshot(enableStubSnapshots).log(false).as('localBus')
cy.stub(win.runnerWs, 'emit').snapshot(enableStubSnapshots)
cy.stub(win.runnerWs, 'emit').snapshot(enableStubSnapshots).log(false)
.withArgs('watch:test:file')
.callsFake(() => {
autCypress = win.Cypress
cy.stub(autCypress, 'onSpecWindow').snapshot(enableStubSnapshots).callsFake((specWindow) => {
cy.stub(autCypress, 'onSpecWindow').snapshot(enableStubSnapshots).log(false).callsFake((specWindow) => {
autCypress.onSpecWindow.restore()
opts.onBeforeRun({ specWindow, win, autCypress })
@@ -238,7 +244,7 @@ function createCypress () {
specWindow.describe = () => {}
})
cy.stub(autCypress, 'run').snapshot(enableStubSnapshots).callsFake(runIsolatedCypress)
cy.stub(autCypress, 'run').snapshot(enableStubSnapshots).log(false).callsFake(runIsolatedCypress)
})
.withArgs('is:automation:client:connected')
.yieldsAsync(true)
@@ -271,16 +277,17 @@ function createCypress () {
.yieldsAsync({ response: {} })
const c = _.extend({}, Cypress.config(), {
isTextTerminal: true,
isTextTerminal: false,
spec: {
relative: 'relative/path/to/spec.js',
absolute: '/absolute/path/to/spec.js',
name: 'empty_spec.js',
},
}, opts.config)
c.state = {}
cy.stub(win.runnerWs, 'on').snapshot(enableStubSnapshots)
cy.stub(win.runnerWs, 'on').snapshot(enableStubSnapshots).log(false)
win.Runner.start(win.document.getElementById('app'), window.btoa(JSON.stringify(c)))
})
@@ -290,7 +297,6 @@ function createCypress () {
return {
runIsolatedCypress,
snapshotMochaEvents,
onInitialized,
getAutCypress,
}
}
@@ -301,7 +307,7 @@ const createHooks = (win, hooks = []) => {
hook = { type: hook }
}
let { type, fail, fn } = hook
let { type, fail, fn, agents } = hook
if (fn) {
if (hook.eval) {
@@ -321,24 +327,34 @@ const createHooks = (win, hooks = []) => {
if (fail) {
const numFailures = fail
return win[type](() => {
return win[type](function () {
const message = `${type} - ${this._runnable.parent.title || 'root'}`
if (agents) {
registerAgents(win)
}
if (_.isNumber(fail) && fail-- <= 0) {
debug(`hook pass after (${numFailures}) failures: ${type}`)
win.assert(true, type)
win.assert(true, message)
return
}
debug(`hook fail: ${type}`)
if (agents) {
failCypressCommand(win, type)
} else {
debug(`hook fail: ${type}`)
win.assert(false, type)
win.assert(false, message)
throw new Error(`hook failed: ${type}`)
throw new Error(`hook failed: ${type}`)
}
})
}
return win[type](() => {
win.assert(true, type)
return win[type](function () {
win.assert(true, `${type} - ${this._runnable.parent.title || 'root'}`)
debug(`hook pass: ${type}`)
})
})
@@ -350,7 +366,7 @@ const createTests = (win, tests = []) => {
test = { name: test }
}
let { name, pending, fail, fn, only } = test
let { name, pending, fail, fn, only, agents } = test
let it = win.it
@@ -379,6 +395,10 @@ const createTests = (win, tests = []) => {
if (fail) {
return it(name, () => {
if (agents) {
registerAgents(win)
}
if (_.isNumber(fail) && fail-- === 0) {
debug(`test pass after retry: ${name}`)
win.assert(true, name)
@@ -386,10 +406,14 @@ const createTests = (win, tests = []) => {
return
}
debug(`test fail: ${name}`)
win.assert(false, name)
if (agents) {
failCypressCommand(win, name)
} else {
debug(`test fail: ${name}`)
win.assert(false, name)
throw new Error(`test fail: ${name}`)
throw new Error(`test fail: ${name}`)
}
})
}
@@ -400,6 +424,16 @@ const createTests = (win, tests = []) => {
})
}
const failCypressCommand = (win, name) => win.cy.wrap(name).then(() => win.assert(false, name))
const registerAgents = (win) => {
const obj = { foo: 'bar' }
win.cy.stub(obj, 'foo')
win.cy.wrap(obj).should('exist')
win.cy.server()
win.cy.route('https://example.com')
}
const createSuites = (win, suites = {}) => {
_.each(suites, (obj, suiteName) => {
let fn = () => {
@@ -434,27 +468,13 @@ const evalFn = (win, fn) => {
}
}
const cleanseRunStateMap = {
wallClockStartedAt: new Date(0),
wallClockDuration: 1,
fnDuration: 1,
afterFnDuration: 1,
lifecycle: 1,
duration: 1,
startTime: new Date(0),
'err.stack': '[err stack]',
sourceMappedStack: match.string,
parsedStack: match.array,
invocationDetails: stringifyShort,
}
const shouldHaveTestResults = (expPassed, expFailed) => {
return ({ failed }) => {
expect(failed, 'resolve with failure count').eq(failed)
const shouldHaveTestResults = (expPassed, expFailed, expPending) => {
return () => {
expPassed = expPassed || '--'
expFailed = expFailed || '--'
cy.get('header .passed .num').should('have.text', `${expPassed}`)
cy.get('header .failed .num').should('have.text', `${expFailed}`)
if (expPending) cy.get('header .pending .num').should('have.text', `${expPending}`)
}
}

View File

@@ -0,0 +1 @@
require('@packages/ui-components/cypress/support/customPercyCommand')

View File

@@ -1,4 +1,4 @@
exports['e2e caught and uncaught hooks errors failing1 1'] = `
exports['e2e caught and uncaught hooks errors / failing1'] = `
====================================================================================================
@@ -110,7 +110,7 @@ Because this error occurred during a \`before all\` hook we are skipping the rem
`
exports['e2e caught and uncaught hooks errors failing2 1'] = `
exports['e2e caught and uncaught hooks errors / failing2'] = `
====================================================================================================
@@ -203,7 +203,7 @@ Because this error occurred during a \`before each\` hook we are skipping the re
`
exports['e2e caught and uncaught hooks errors failing3 1'] = `
exports['e2e caught and uncaught hooks errors / failing3'] = `
====================================================================================================
@@ -287,7 +287,7 @@ Because this error occurred during a \`before each\` hook we are skipping all of
`
exports['e2e caught and uncaught hooks errors failing4 1'] = `
exports['e2e caught and uncaught hooks errors / failing4'] = `
====================================================================================================

View File

@@ -311,7 +311,7 @@ exports['e2e plugins calls after:screenshot for cy.screenshot() and failure scre
│ Failing: 1 │
│ Pending: 0 │
│ Skipped: 0 │
│ Screenshots: 4
│ Screenshots: 3
│ Video: true │
│ Duration: X seconds │
│ Spec Ran: after_screenshot_spec.coffee │
@@ -323,7 +323,6 @@ exports['e2e plugins calls after:screenshot for cy.screenshot() and failure scre
- /XXX/XXX/XXX/screenshot-replacement.png (YxX)
- /XXX/XXX/XXX/cypress/screenshots/after_screenshot_spec.coffee/ignored-values.png (YxX)
- /XXX/XXX/XXX/cypress/screenshots/after_screenshot_spec.coffee/invalid-return.png (YxX)
- /XXX/XXX/XXX/screenshot-replacement.png (YxX)
(Video)
@@ -446,3 +445,139 @@ The following are valid events:
[stack trace lines]
`
exports['e2e plugins does not report more screenshots than exist if user overwrites screenshot in afterScreenshot hook 1'] = `
====================================================================================================
(Run Starting)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Cypress: 1.2.3 │
│ Browser: FooBrowser 88 │
│ Specs: 1 found (after_screenshot_overwrite_spec.coffee) │
│ Searched: cypress/integration/after_screenshot_overwrite_spec.coffee │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
────────────────────────────────────────────────────────────────────────────────────────────────────
Running: after_screenshot_overwrite_spec.coffee (1 of 1)
✓ cy.screenshot() - replacement
✓ cy.screenshot() - replacement
✓ cy.screenshot() - replacement
3 passing
(Results)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Tests: 3 │
│ Passing: 3 │
│ Failing: 0 │
│ Pending: 0 │
│ Skipped: 0 │
│ Screenshots: 1 │
│ Video: true │
│ Duration: X seconds │
│ Spec Ran: after_screenshot_overwrite_spec.coffee │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
(Screenshots)
- /XXX/XXX/XXX/screenshot-replacement.png (2x2)
(Video)
- Started processing: Compressing to 32 CRF
- Finished processing: /XXX/XXX/XXX/cypress/videos/after_screenshot_overwrite_spec (X second)
.coffee.mp4
====================================================================================================
(Run Finished)
Spec Tests Passing Failing Pending Skipped
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ ✔ after_screenshot_overwrite_spec.cof XX:XX 3 3 - - - │
│ fee │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
✔ All specs passed! XX:XX 3 3 - - -
`
exports['e2e plugins does not report more screenshots than exist if user overwrites previous screenshot in afterScreenshot 1'] = `
====================================================================================================
(Run Starting)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Cypress: 1.2.3 │
│ Browser: FooBrowser 88 │
│ Specs: 1 found (after_screenshot_overwrite_spec.coffee) │
│ Searched: cypress/integration/after_screenshot_overwrite_spec.coffee │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
────────────────────────────────────────────────────────────────────────────────────────────────────
Running: after_screenshot_overwrite_spec.coffee (1 of 1)
✓ cy.screenshot() - replacement
✓ cy.screenshot() - replacement
✓ cy.screenshot() - replacement
3 passing
(Results)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Tests: 3 │
│ Passing: 3 │
│ Failing: 0 │
│ Pending: 0 │
│ Skipped: 0 │
│ Screenshots: 1 │
│ Video: true │
│ Duration: X seconds │
│ Spec Ran: after_screenshot_overwrite_spec.coffee │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
(Screenshots)
- /XXX/XXX/XXX/screenshot-replacement.png (2x2)
(Video)
- Started processing: Compressing to 32 CRF
- Finished processing: /XXX/XXX/XXX/cypress/videos/after_screenshot_overwrite_spec (X second)
.coffee.mp4
====================================================================================================
(Run Finished)
Spec Tests Passing Failing Pending Skipped
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ ✔ after_screenshot_overwrite_spec.cof XX:XX 3 3 - - - │
│ fee │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
✔ All specs passed! XX:XX 3 3 - - -
`

View File

@@ -0,0 +1,134 @@
exports['retries / supports retries'] = `
====================================================================================================
(Run Starting)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Cypress: 1.2.3 │
│ Browser: FooBrowser 88 │
│ Specs: 1 found (fail-twice.js) │
│ Searched: cypress/integration/fail-twice.js │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
────────────────────────────────────────────────────────────────────────────────────────────────────
Running: fail-twice.js (1 of 1)
(Attempt 1 of 3) fail twice
(Attempt 2 of 3) fail twice
✓ fail twice
1 passing
(Results)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Tests: 1 │
│ Passing: 1 │
│ Failing: 0 │
│ Pending: 0 │
│ Skipped: 0 │
│ Screenshots: 2 │
│ Video: true │
│ Duration: X seconds │
│ Spec Ran: fail-twice.js │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
(Screenshots)
- /XXX/XXX/XXX/cypress/screenshots/fail-twice.js/fail twice (failed).png (1280x720)
- /XXX/XXX/XXX/cypress/screenshots/fail-twice.js/fail twice (failed) (attempt 2).p (1280x720)
ng
(Video)
- Started processing: Compressing to 32 CRF
- Finished processing: /XXX/XXX/XXX/cypress/videos/fail-twice.js.mp4 (X second)
====================================================================================================
(Run Finished)
Spec Tests Passing Failing Pending Skipped
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ ✔ fail-twice.js XX:XX 1 1 - - - │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
✔ All specs passed! XX:XX 1 1 - - -
`
exports['retries / warns about retries plugin'] = `
We've detected that the incompatible plugin \`cypress-plugin-retries\` is installed at \`node_modules/cypress-plugin-retries\`.
Test retries is now supported in Cypress version \`5.0.0\`.
Remove the plugin from your dependencies to silence this warning.
https://on.cypress.io/test-retries
====================================================================================================
(Run Starting)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Cypress: 1.2.3 │
│ Browser: FooBrowser 88 │
│ Specs: 1 found (main.spec.js) │
│ Searched: cypress/integration/main.spec.js │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
────────────────────────────────────────────────────────────────────────────────────────────────────
Running: main.spec.js (1 of 1)
✓ foo
1 passing
(Results)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Tests: 1 │
│ Passing: 1 │
│ Failing: 0 │
│ Pending: 0 │
│ Skipped: 0 │
│ Screenshots: 0 │
│ Video: true │
│ Duration: X seconds │
│ Spec Ran: main.spec.js │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
(Video)
- Started processing: Compressing to 32 CRF
- Finished processing: /XXX/XXX/XXX/cypress/videos/main.spec.js.mp4 (X second)
====================================================================================================
(Run Finished)
Spec Tests Passing Failing Pending Skipped
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ ✔ main.spec.js XX:XX 1 1 - - - │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
✔ All specs passed! XX:XX 1 1 - - -
`

View File

@@ -24,9 +24,12 @@ exports['e2e runnable execution / cannot navigate in before hook and test'] = `
✓ test
1) causes domain navigation
navigation error in beforeEach
2) "before each" hook for "never gets here"
2 passing
1 failing
2 failing
1) suite
causes domain navigation:
@@ -51,15 +54,40 @@ You may need to restructure some of your test code to avoid this problem.
https://on.cypress.io/cannot-visit-different-origin-domain
[stack trace lines]
2) navigation error in beforeEach
"before each" hook for "never gets here":
CypressError: \`cy.visit()\` failed because you are attempting to visit a URL that is of a different origin.
The new URL is considered a different origin because the following parts of the URL are different:
> port
You may only \`cy.visit()\` same-origin URLs within a single test.
The previous URL you visited was:
> 'http://localhost:4545'
You're attempting to visit this URL:
> 'http://localhost:5656'
You may need to restructure some of your test code to avoid this problem.
https://on.cypress.io/cannot-visit-different-origin-domain
Because this error occurred during a \`before each\` hook we are skipping the remaining tests in the current suite: \`navigation error in beforeEach\`
[stack trace lines]
(Results)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Tests: 3
│ Tests: 4
│ Passing: 2 │
│ Failing: 1
│ Failing: 2
│ Pending: 0 │
│ Skipped: 0 │
│ Screenshots: 0 │
@@ -83,9 +111,9 @@ https://on.cypress.io/cannot-visit-different-origin-domain
Spec Tests Passing Failing Pending Skipped
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ ✖ beforehook-and-test-navigation.js XX:XX 3 2 1 - - │
│ ✖ beforehook-and-test-navigation.js XX:XX 4 2 2 - - │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
✖ 1 of 1 failed (100%) XX:XX 3 2 1 - -
✖ 1 of 1 failed (100%) XX:XX 4 2 2 - -
`

View File

@@ -29,8 +29,11 @@ exports['e2e screenshots / passes'] = `
✓ accepts screenshot after multiple tries if somehow app has pixels that match helper pixels
✓ can capture element screenshots
✓ retries each screenshot for up to XX:XX
(Attempt 1 of 3) screenshots in a retried test
(Attempt 2 of 3) screenshots in a retried test
2) screenshots in a retried test
✓ ensures unique paths for non-named screenshots
2) ensures unique paths when there's a non-named screenshot and a failure
3) ensures unique paths when there's a non-named screenshot and a failure
✓ properly resizes the AUT iframe
- does not take a screenshot for a pending test
✓ adds padding to element screenshot when specified
@@ -41,10 +44,10 @@ exports['e2e screenshots / passes'] = `
✓ can clip fullPage screenshots
✓ can clip element screenshots
before hooks
3) "before all" hook for "empty test 1"
4) "before all" hook for "empty test 1"
each hooks
4) "before each" hook for "empty test 2"
5) "after each" hook for "empty test 2"
5) "before each" hook for "empty test 2"
6) "after each" hook for "empty test 2"
really long test title aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
✓ takes a screenshot
✓ takes another screenshot
@@ -52,7 +55,7 @@ exports['e2e screenshots / passes'] = `
20 passing
1 pending
5 failing
6 failing
1) taking screenshots
generates pngs on failure:
@@ -60,11 +63,16 @@ exports['e2e screenshots / passes'] = `
[stack trace lines]
2) taking screenshots
screenshots in a retried test:
Error: fail
[stack trace lines]
3) taking screenshots
ensures unique paths when there's a non-named screenshot and a failure:
Error: failing on purpose
[stack trace lines]
3) taking screenshots
4) taking screenshots
before hooks
"before all" hook for "empty test 1":
Error: before hook failing
@@ -72,7 +80,7 @@ exports['e2e screenshots / passes'] = `
Because this error occurred during a \`before all\` hook we are skipping the remaining tests in the current suite: \`before hooks\`
[stack trace lines]
4) taking screenshots
5) taking screenshots
each hooks
"before each" hook for "empty test 2":
Error: before each hook failed
@@ -80,7 +88,7 @@ Because this error occurred during a \`before all\` hook we are skipping the rem
Because this error occurred during a \`before each\` hook we are skipping the remaining tests in the current suite: \`each hooks\`
[stack trace lines]
5) taking screenshots
6) taking screenshots
each hooks
"after each" hook for "empty test 2":
Error: after each hook failed
@@ -94,12 +102,12 @@ Because this error occurred during a \`after each\` hook we are skipping the rem
(Results)
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Tests: 25
│ Tests: 26
│ Passing: 20 │
│ Failing: 4
│ Failing: 5
│ Pending: 1 │
│ Skipped: 0 │
│ Screenshots: 28
│ Screenshots: 34
│ Video: true │
│ Duration: X seconds │
│ Spec Ran: screenshots_spec.js │
@@ -121,6 +129,17 @@ Because this error occurred during a \`after each\` hook we are skipping the rem
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/element.png (400x300)
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/taking screenshots -- retri (200x1300)
es each screenshot for up to XX:XX.png
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/retrying-test.png (1000x1316)
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/taking screenshots -- scree (1280x720)
nshots in a retried test (failed).png
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/retrying-test (attempt 2).p (1000x1316)
ng
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/taking screenshots -- scree (1280x720)
nshots in a retried test (failed) (attempt 2).png
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/retrying-test (attempt 3).p (1000x1316)
ng
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/taking screenshots -- scree (1280x720)
nshots in a retried test (failed) (attempt 3).png
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/taking screenshots -- ensur (1280x720)
es unique paths for non-named screenshots.png
- /XXX/XXX/XXX/cypress/screenshots/screenshots_spec.js/taking screenshots -- ensur (1280x720)
@@ -167,9 +186,9 @@ Because this error occurred during a \`after each\` hook we are skipping the rem
Spec Tests Passing Failing Pending Skipped
┌────────────────────────────────────────────────────────────────────────────────────────────────┐
│ ✖ screenshots_spec.js XX:XX 25 20 4 1 - │
│ ✖ screenshots_spec.js XX:XX 26 20 5 1 - │
└────────────────────────────────────────────────────────────────────────────────────────────────┘
✖ 1 of 1 failed (100%) XX:XX 25 20 4 1 -
✖ 1 of 1 failed (100%) XX:XX 26 20 5 1 -
`

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,7 @@ exports['e2e record passing passes 1'] = `
│ e, record_uncaught_spec.coffee) │
│ Searched: cypress/integration/record* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -247,7 +247,7 @@ We dynamically generated a new test to display this failure.
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -348,7 +348,7 @@ exports['e2e record api interaction errors create instance does not update insta
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
Warning: We encountered an error talking to our servers.
@@ -407,7 +407,7 @@ StatusCodeError: 500 - "Internal Server Error"
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -424,7 +424,7 @@ exports['e2e record api interaction errors update instance does not update insta
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -488,7 +488,7 @@ StatusCodeError: 500 - "Internal Server Error"
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -505,7 +505,7 @@ exports['e2e record api interaction errors update instance stdout warns but proc
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -570,7 +570,7 @@ StatusCodeError: 500 - "Internal Server Error"
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -626,7 +626,7 @@ exports['e2e record video recording does not upload when not enabled 1'] = `
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -684,7 +684,7 @@ exports['e2e record video recording does not upload when not enabled 1'] = `
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -701,7 +701,7 @@ exports['e2e record api interaction errors uploading assets warns but proceeds 1
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -766,7 +766,7 @@ exports['e2e record api interaction errors uploading assets warns but proceeds 1
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -882,7 +882,7 @@ exports['e2e record parallelization passes in parallel with group 1'] = `
│ e, record_uncaught_spec.coffee) │
│ Searched: cypress/integration/record* │
│ Params: Tag: nightly, Group: prod-e2e, Parallel: true │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -940,7 +940,7 @@ exports['e2e record parallelization passes in parallel with group 1'] = `
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -958,7 +958,7 @@ exports['e2e record parallelization passes in parallel with group 2'] = `
│ e, record_uncaught_spec.coffee) │
│ Searched: cypress/integration/record* │
│ Params: Tag: nightly, Group: prod-e2e, Parallel: true │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -1152,7 +1152,7 @@ We dynamically generated a new test to display this failure.
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -1286,7 +1286,7 @@ exports['e2e record api interaction errors create instance 500 does not proceed
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: nightly, Group: foo, Parallel: true │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
We encountered an unexpected error talking to our servers.
@@ -1314,7 +1314,7 @@ exports['e2e record api interaction errors update instance 500 does not proceed
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: nightly, Group: foo, Parallel: true │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -1402,7 +1402,7 @@ StatusCodeError: 500 - "Internal Server Error"
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: nightly, Group: foo, Parallel: true │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
We encountered an unexpected error talking to our servers.
@@ -1467,7 +1467,7 @@ StatusCodeError: 500 - "Internal Server Error"
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -1611,7 +1611,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -1669,7 +1669,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -1691,7 +1691,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -1749,7 +1749,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -1771,7 +1771,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -1829,7 +1829,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -1851,7 +1851,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -1909,7 +1909,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -1931,7 +1931,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -1989,7 +1989,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -2011,7 +2011,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -2069,7 +2069,7 @@ https://on.cypress.io/dashboard/organizations/org-id-1234/billing
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`
@@ -2095,7 +2095,7 @@ Details:
│ Specs: 1 found (record_pass_spec.coffee) │
│ Searched: cypress/integration/record_pass* │
│ Params: Tag: false, Group: false, Parallel: false │
│ Run URL: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12 │
│ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12
└────────────────────────────────────────────────────────────────────────────────────────────────┘
@@ -2153,7 +2153,7 @@ Details:
───────────────────────────────────────────────────────────────────────────────────────────────────────
Recorded Run: https://dashboard.cypress.io/#/projects/cjvoj7/runs/12
Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12
`

View File

@@ -224,15 +224,13 @@ exports['e2e reporters mochawesome fails with mochawesome-1.5.2 npm custom repor
pending
- is pending
afterEach hooks
✓ runs this
2) "after each" hook for "runs this"
after hooks
✓ runs this
✓ fails on this
3) "after all" hook for "fails on this"
3 passing
1 passing
1 pending
3 failing
@@ -404,15 +402,13 @@ exports['e2e reporters mochawesome fails with mochawesome-2.3.1 npm custom repor
pending
- is pending
afterEach hooks
✓ runs this
2) "after each" hook for "runs this"
after hooks
✓ runs this
✓ fails on this
3) "after all" hook for "fails on this"
3 passing
1 passing
1 pending
3 failing
@@ -584,15 +580,13 @@ exports['e2e reporters mochawesome fails with mochawesome-3.0.1 npm custom repor
pending
- is pending
afterEach hooks
✓ runs this
2) "after each" hook for "runs this"
after hooks
✓ runs this
✓ fails on this
3) "after all" hook for "fails on this"
3 passing
1 passing
1 pending
3 failing

View File

@@ -27,17 +27,21 @@ exports['lib/reporter #stats has reporterName stats, reporterStats, etc 1'] = {
],
"state": "failed",
"body": "",
"stack": [
1,
2,
3
],
"error": "foo",
"timings": null,
"failedFromHookId": null,
"wallClockStartedAt": null,
"wallClockDuration": null,
"videoTimestamp": null
"displayError": "at foo:1:1\nat bar:1:1\nat baz:1:1",
"attempts": [
{
"state": "failed",
"error": {
"message": "foo",
"stack": "at foo:1:1\nat bar:1:1\nat baz:1:1"
},
"timings": null,
"failedFromHookId": null,
"wallClockStartedAt": null,
"wallClockDuration": null,
"videoTimestamp": null
}
]
},
{
"testId": "r5",
@@ -48,13 +52,18 @@ exports['lib/reporter #stats has reporterName stats, reporterStats, etc 1'] = {
],
"state": "pending",
"body": "",
"stack": null,
"error": null,
"timings": null,
"failedFromHookId": null,
"wallClockStartedAt": null,
"wallClockDuration": null,
"videoTimestamp": null
"displayError": null,
"attempts": [
{
"state": "pending",
"error": null,
"timings": null,
"failedFromHookId": null,
"wallClockStartedAt": null,
"wallClockDuration": null,
"videoTimestamp": null
}
]
}
]
}

View File

@@ -268,7 +268,7 @@ module.exports = {
json: true,
timeout: options.timeout != null ? options.timeout : SIXTY_SECONDS,
headers: {
'x-route-version': '2',
'x-route-version': '3',
},
body: _.pick(options, [
'stats',

View File

@@ -70,6 +70,9 @@ const getPrimaryTab = Bluebird.method((browser) => {
})
const attachToTabMemory = Bluebird.method((tab) => {
// TODO: figure out why tab.memory is sometimes undefined
if (!tab.memory) return
if (tab.memory.isAttached) {
return
}
@@ -186,6 +189,9 @@ export default {
const gc = (tab) => {
return () => {
// TODO: figure out why tab.memory is sometimes undefined
if (!tab.memory) return
let start = Date.now()
return tab.memory.forceGarbageCollection()
@@ -198,6 +204,9 @@ export default {
const cc = (tab) => {
return () => {
// TODO: figure out why tab.memory is sometimes undefined
if (!tab.memory) return
let start = Date.now()
return tab.memory.forceCycleCollection()

View File

@@ -80,7 +80,8 @@ screenshotOnRunFailure
watchForFileChanges
waitForAnimations resolvedNodeVersion
nodeVersion resolvedNodePath
firefoxGcInterval\
firefoxGcInterval
retries
`)
// NOTE: If you add a config value, make sure to update the following
@@ -180,6 +181,7 @@ const CONFIG_DEFAULTS = {
experimentalSourceRewriting: false,
experimentalShadowDomSupport: false,
experimentalFetchPolyfill: false,
retries: { runMode: 0, openMode: 0 },
}
const validationRules = {
@@ -228,6 +230,7 @@ const validationRules = {
experimentalSourceRewriting: v.isBoolean,
experimentalShadowDomSupport: v.isBoolean,
experimentalFetchPolyfill: v.isBoolean,
retries: v.isValidRetriesConfig,
}
const convertRelativeToAbsolutePaths = (projectRoot, obj, defaults = {}) => {

View File

@@ -925,9 +925,19 @@ const getMsgByType = function (type, arg1 = {}, arg2, arg3) {
If you don't require screenshots or videos to be stored you can safely ignore this warning.`
case 'EXPERIMENTAL_SAMESITE_REMOVED':
return stripIndent`\
The \`experimentalGetCookiesSameSite\` configuration option was removed in Cypress version 5.0.0. Yielding the \`sameSite\` property is now the default behavior of the \`cy.cookie\` commands.
The \`experimentalGetCookiesSameSite\` configuration option was removed in Cypress version \`5.0.0\`. Yielding the \`sameSite\` property is now the default behavior of the \`cy.cookie\` commands.
You can safely remove this option from your config.`
case 'INCOMPATIBLE_PLUGIN_RETRIES':
return stripIndent`\
We've detected that the incompatible plugin \`cypress-plugin-retries\` is installed at \`${arg1}\`.
Test retries is now supported in Cypress version \`5.0.0\`.
Remove the plugin from your dependencies to silence this warning.
https://on.cypress.io/test-retries
`
default:
}
}

View File

@@ -227,7 +227,6 @@ const updateInstance = (options = {}) => {
error,
video,
hooks,
stdout: null, // don't send stdout with the instance payload to prevent requests that are too large. stdout will later get uploaded separately anyway.
instanceId,
screenshots,
reporterStats,

View File

@@ -916,7 +916,16 @@ module.exports = {
browserOpts.automationMiddleware = {
onAfterResponse: (message, data, resp) => {
if (message === 'take:screenshot' && resp) {
screenshots.push(this.screenshotMetadata(data, resp))
const existingScreenshot = _.findIndex(screenshots, { path: resp.path })
if (existingScreenshot !== -1) {
// NOTE: saving screenshots to the same path will overwrite the previous one
// so we shouldn't report more screenshots than exist on disk.
// this happens when cy.screenshot is used in a retried test
screenshots.splice(existingScreenshot, 1, this.screenshotMetadata(data, resp))
} else {
screenshots.push(this.screenshotMetadata(data, resp))
}
}
return resp
@@ -1112,12 +1121,14 @@ module.exports = {
const { tests, stats } = obj
const attempts = _.flatMap(tests, (test) => test.attempts)
const hasFailingTests = _.get(stats, 'failures') > 0
// if we have a video recording
if (startedVideoCapture && tests && tests.length) {
// always set the video timestamp on tests
obj.tests = Reporter.setVideoTimestamp(startedVideoCapture, tests)
Reporter.setVideoTimestamp(startedVideoCapture, attempts)
}
// we should upload the video if we upload on passes (by default)
@@ -1160,6 +1171,7 @@ module.exports = {
screenshotId: random.id(),
name: data.name || null,
testId: data.testId,
testAttemptIndex: data.testAttemptIndex,
takenAt: resp.takenAt,
path: resp.path,
height: resp.dimensions.height,

View File

@@ -2,6 +2,7 @@ const _ = require('lodash')
const cp = require('child_process')
const path = require('path')
const debug = require('debug')('cypress:server:plugins')
const resolve = require('resolve')
const Promise = require('bluebird')
const errors = require('../errors')
const util = require('./util')
@@ -38,6 +39,17 @@ const registerHandler = (handler) => {
const init = (config, options) => {
debug('plugins.init', config.pluginsFile)
// test and warn for incompatible plugin
try {
const retriesPluginPath = path.dirname(resolve.sync('cypress-plugin-retries', {
basedir: options.projectRoot,
}))
options.onWarning(errors.get('INCOMPATIBLE_PLUGIN_RETRIES', path.relative(options.projectRoot, retriesPluginPath)))
} catch (e) {
// noop, incompatible plugin not installed
}
return new Promise((_resolve, _reject) => {
// provide a safety net for fulfilling the promise because the
// 'handleError' function below can potentially be triggered

View File

@@ -1,11 +1,13 @@
const _ = require('lodash')
const path = require('path')
const stackUtils = require('./util/stack_utils')
// mocha-* is used to allow us to have later versions of mocha specified in devDependencies
// and prevents accidently upgrading this one
// TODO: look into upgrading this to version in driver
const Mocha = require('mocha-7.0.1')
const mochaReporters = require('mocha-7.0.1/lib/reporters')
const mochaCreateStatsCollector = require('mocha-7.0.1/lib/stats-collector')
const mochaColor = mochaReporters.Base.color
const debug = require('debug')('cypress:server:reporter')
const Promise = require('bluebird')
@@ -99,6 +101,10 @@ const createRunnable = function (obj, parent) {
runnable.sync = obj.sync
runnable.duration = obj.duration
runnable.state = obj.state != null ? obj.state : 'skipped' // skipped by default
runnable._retries = obj._retries
// shouldn't need to set _currentRetry, but we'll do it anyways
runnable._currentRetry = obj._currentRetry
if (runnable.body == null) {
runnable.body = body
}
@@ -110,10 +116,42 @@ const createRunnable = function (obj, parent) {
return runnable
}
const mochaProps = {
'currentRetry': '_currentRetry',
'retries': '_retries',
}
const toMochaProps = (testProps) => {
return _.each(mochaProps, (val, key) => {
if (testProps.hasOwnProperty(key)) {
testProps[val] = testProps[key]
return delete testProps[key]
}
})
}
const mergeRunnable = (eventName) => {
return (function (testProps, runnables) {
toMochaProps(testProps)
const runnable = runnables[testProps.id]
if (eventName === 'test:before:run') {
if (testProps._currentRetry > runnable._currentRetry) {
debug('test retried:', testProps.title)
const prevAttempts = runnable.prevAttempts || []
delete runnable.prevAttempts
const prevAttempt = _.cloneDeep(runnable)
delete runnable.failedFromHookId
delete runnable.err
delete runnable.hookName
testProps.prevAttempts = prevAttempts.concat([prevAttempt])
}
}
return _.extend(runnable, testProps)
})
}
@@ -172,6 +210,12 @@ const setDate = function (obj, runnables, stats) {
return null
}
const orNull = function (prop) {
if (prop == null) return null
return prop
}
const events = {
'start': setDate,
'end': setDate,
@@ -180,11 +224,13 @@ const events = {
'test': mergeRunnable('test'),
'test end': mergeRunnable('test end'),
'hook': safelyMergeRunnable,
'retry': true,
'hook end': safelyMergeRunnable,
'pass': mergeRunnable('pass'),
'pending': mergeRunnable('pending'),
'fail': mergeErr,
'test:after:run': mergeRunnable('test:after:run'), // our own custom event
'test:before:run': mergeRunnable('test:before:run'), // our own custom event
}
const reporters = {
@@ -201,6 +247,7 @@ class Reporter {
this.reporterName = reporterName
this.projectRoot = projectRoot
this.reporterOptions = reporterOptions
this.normalizeTest = this.normalizeTest.bind(this)
}
setRunnables (rootRunnable) {
@@ -219,6 +266,18 @@ class Reporter {
this.runner = new Mocha.Runner(rootRunnable)
mochaCreateStatsCollector(this.runner)
if (this.reporterName === 'spec') {
this.runner.on('retry', (test) => {
const runnable = this.runnables[test.id]
const padding = ' '.repeat(runnable.titlePath().length)
const retryMessage = mochaColor('medium', `(Attempt ${test.currentRetry + 1} of ${test.retries + 1})`)
// Log: `(Attempt 1 of 2) test title` when a test retries
// eslint-disable-next-line no-console
return console.log(`${padding}${retryMessage} ${test.title}`)
})
}
this.reporter = new this.mocha._reporter(this.runner, {
reporterOptions: this.reporterOptions,
})
@@ -260,7 +319,7 @@ class Reporter {
args = this.parseArgs(event, args)
if (args) {
return (this.runner != null ? this.runner.emit.apply(this.runner, args) : undefined)
return this.runner && this.runner.emit.apply(this.runner, args)
}
}
@@ -292,39 +351,32 @@ class Reporter {
}
normalizeTest (test = {}) {
let wcs
const get = (prop) => {
return _.get(test, prop, null)
}
// use this or null
wcs = get('wallClockStartedAt')
if (wcs) {
// convert to actual date object
wcs = new Date(wcs)
}
// wallClockDuration:
// this is the 'real' duration of wall clock time that the
// user 'felt' when the test run. it includes everything
// from hooks, to the test itself, to lifecycle, and event
// async browser compute time. this number is likely higher
// than summing the durations of the timings.
//
return {
testId: get('id'),
const normalizedTest = {
testId: orNull(test.id),
title: getParentTitle(test),
state: get('state'),
body: get('body'),
stack: get('err.stack'),
error: get('err.message'),
timings: get('timings'),
failedFromHookId: get('failedFromHookId'),
wallClockStartedAt: wcs,
wallClockDuration: get('wallClockDuration'),
videoTimestamp: null, // always start this as null
state: orNull(test.state),
body: orNull(test.body),
displayError: orNull(test.err && test.err.stack),
attempts: _.map([test].concat(test.prevAttempts || []), (attempt) => {
const err = attempt.err && {
name: attempt.err.name,
message: attempt.err.message,
stack: stackUtils.stackWithoutMessage(attempt.err.stack),
}
return {
state: orNull(attempt.state),
error: orNull(err),
timings: orNull(attempt.timings),
failedFromHookId: orNull(attempt.failedFromHookId),
wallClockStartedAt: orNull(attempt.wallClockStartedAt && new Date(attempt.wallClockStartedAt)),
wallClockDuration: orNull(attempt.wallClockDuration),
videoTimestamp: null,
}
}),
}
return normalizedTest
}
end () {

View File

@@ -340,6 +340,10 @@ const getPath = function (data, ext, screenshotsFolder) {
names[index] = `${names[index]} (failed)`
}
if (data.testAttemptIndex > 0) {
names[index] = `${names[index]} (attempt ${data.testAttemptIndex + 1})`
}
const withoutExt = path.join(screenshotsFolder, ...specNames, ...names)
return ensureUniquePath(withoutExt, ext)
@@ -484,7 +488,7 @@ module.exports = {
const duration = new Date() - new Date(data.startTime)
details = _.extend({}, data, details, { duration })
details = _.pick(details, 'size', 'takenAt', 'dimensions', 'multipart', 'pixelRatio', 'name', 'specName', 'testFailure', 'path', 'scaled', 'blackout', 'duration')
details = _.pick(details, 'testAttemptIndex', 'size', 'takenAt', 'dimensions', 'multipart', 'pixelRatio', 'name', 'specName', 'testFailure', 'path', 'scaled', 'blackout', 'duration')
if (!plugins.has('after:screenshot')) {
return Promise.resolve(details)

View File

@@ -0,0 +1,44 @@
import _ from 'lodash'
const stackLineRegex = /^\s*(at )?.*@?\(?.*\:\d+\:\d+\)?$/
// returns tuple of [message, stack]
export const splitStack = (stack: string) => {
const lines = stack.split('\n')
return _.reduce(lines, (memo, line) => {
if (memo.messageEnded || stackLineRegex.test(line)) {
memo.messageEnded = true
memo[1].push(line)
} else {
memo[0].push(line)
}
return memo
}, [[], []] as any[] & {messageEnded: boolean})
}
export const unsplitStack = (messageLines, stackLines) => {
return _.castArray(messageLines).concat(stackLines).join('\n')
}
export const getStackLines = (stack) => {
const [, stackLines] = splitStack(stack)
return stackLines
}
export const stackWithoutMessage = (stack) => {
return getStackLines(stack).join('\n')
}
export const replacedStack = (err, newStack) => {
// if err already lacks a stack or we've removed the stack
// for some reason, keep it stackless
if (!err.stack) return err.stack
const errString = err.toString()
const stackLines = getStackLines(newStack)
return unsplitStack(errString, stackLines)
}

View File

@@ -103,6 +103,21 @@ const isValidBrowserList = (key, browsers) => {
return true
}
const isValidRetriesConfig = (key, value) => {
const isNullOrNumber = isOneOf([_.isNumber, _.isNull])
if (
isNullOrNumber(value)
|| (_.isEqual(_.keys(value), ['runMode', 'openMode']))
&& isNullOrNumber(value.runMode)
&& isNullOrNumber(value.openMode)
) {
return true
}
return errMsg(key, value, 'a number or null or an object with keys "openMode" and "runMode" with values of numbers or nulls')
}
const isValidFirefoxGcInterval = (key, value) => {
const isIntervalValue = (val) => {
if (isNumber(val)) {
@@ -122,6 +137,24 @@ const isValidFirefoxGcInterval = (key, value) => {
return errMsg(key, value, 'a positive number or null or an object with "openMode" and "runMode" as keys and positive numbers or nulls as values')
}
const isOneOf = (...values) => {
return (key, value) => {
if (values.some((v) => {
if (typeof value === 'function') {
return value(v)
}
return v === value
})) {
return true
}
const strings = values.map(str).join(', ')
return errMsg(key, value, `one of these values: ${strings}`)
}
}
module.exports = {
isValidBrowser,
@@ -129,6 +162,8 @@ module.exports = {
isValidFirefoxGcInterval,
isValidRetriesConfig,
isNumber (key, value) {
if (value == null || isNumber(value)) {
return true
@@ -214,17 +249,5 @@ module.exports = {
validate("example", "else") // error message string
```
*/
isOneOf (...values) {
return (key, value) => {
if (values.some((v) => {
return v === value
})) {
return true
}
const strings = values.map(str).join(', ')
return errMsg(key, value, `one of these values: ${strings}`)
}
},
isOneOf,
}

View File

@@ -124,7 +124,7 @@
"@babel/core": "7.9.0",
"@babel/preset-env": "7.9.0",
"@cypress/debugging-proxy": "2.0.1",
"@cypress/json-schemas": "5.34.2",
"@cypress/json-schemas": "5.35.0",
"@cypress/sinon-chai": "1.1.0",
"@packages/desktop-gui": "*",
"@packages/electron": "*",

View File

@@ -8,35 +8,27 @@ describe('e2e caught and uncaught hooks errors', () => {
},
})
it('failing1', function () {
return e2e.exec(this, {
spec: 'hook_caught_error_failing_spec.coffee',
snapshot: true,
expectedExitCode: 3,
})
e2e.it('failing1', {
spec: 'hook_caught_error_failing_spec.coffee',
snapshot: true,
expectedExitCode: 3,
})
it('failing2', function () {
return e2e.exec(this, {
spec: 'hook_uncaught_error_failing_spec.coffee',
snapshot: true,
expectedExitCode: 1,
})
e2e.it('failing2', {
spec: 'hook_uncaught_error_failing_spec.coffee',
snapshot: true,
expectedExitCode: 1,
})
it('failing3', function () {
return e2e.exec(this, {
spec: 'hook_uncaught_root_error_failing_spec.coffee',
snapshot: true,
expectedExitCode: 1,
})
e2e.it('failing3', {
spec: 'hook_uncaught_root_error_failing_spec.coffee',
snapshot: true,
expectedExitCode: 1,
})
it('failing4', function () {
return e2e.exec(this, {
spec: 'hook_uncaught_error_events_failing_spec.coffee',
snapshot: true,
expectedExitCode: 1,
})
e2e.it('failing4', {
spec: 'hook_uncaught_error_events_failing_spec.coffee',
snapshot: true,
expectedExitCode: 1,
})
})

View File

@@ -150,6 +150,15 @@ describe('e2e plugins', function () {
})
})
// https://github.com/cypress-io/cypress/issues/8079
it('does not report more screenshots than exist if user overwrites previous screenshot in afterScreenshot', function () {
return e2e.exec(this, {
spec: 'after_screenshot_overwrite_spec.coffee',
project: pluginAfterScreenshot,
snapshot: true,
})
})
it('fails when invalid event is registered', function () {
return e2e.exec(this, {
spec: 'app_spec.js',

View File

@@ -0,0 +1,21 @@
import e2e from '../support/helpers/e2e'
import Fixtures from '../support/helpers/fixtures'
const it = e2e.it
describe('retries', () => {
e2e.setup()
it('supports retries', {
project: Fixtures.projectPath('retries-2'),
spec: 'fail-twice.js',
snapshot: true,
})
it('warns about retries plugin', {
project: Fixtures.projectPath('plugin-retries'),
spec: 'main.spec.js',
stubPackage: 'cypress-plugin-retries',
snapshot: true,
})
})

View File

@@ -24,7 +24,7 @@ describe('e2e runnable execution', () => {
project: Fixtures.projectPath('hooks-after-rerun'),
spec: 'beforehook-and-test-navigation.js',
snapshot: true,
expectedExitCode: 1,
expectedExitCode: 2,
})
e2e.it('runnables run correct number of times with navigation', {

View File

@@ -63,7 +63,7 @@ describe('e2e screenshots', () => {
// the test title as the file name
e2e.it('passes', {
spec: 'screenshots_spec.js',
expectedExitCode: 4,
expectedExitCode: 5,
snapshot: true,
timeout: 180000,
onStdout: e2e.normalizeWebpackErrors,

View File

@@ -90,9 +90,12 @@ const expectRunsToHaveCorrectStats = (runs = []) => {
expectStartToBeBeforeEnd(run, 'stats.wallClockStartedAt', 'stats.wallClockEndedAt')
expectStartToBeBeforeEnd(run, 'reporterStats.start', 'reporterStats.end')
// grab all the wallclock durations for all tests
// grab all the wallclock durations for all test (and retried attempts)
// because our duration should be at least this
const wallClocks = _.sumBy(run.tests, 'wallClockDuration')
const attempts = _.flatMap(run.tests, (test) => test.attempts)
const wallClocks = _.sumBy(attempts, 'wallClockDuration')
// ensure each run's duration is around the sum
// of all tests wallclock duration
@@ -100,7 +103,7 @@ const expectRunsToHaveCorrectStats = (runs = []) => {
run,
'stats.wallClockDuration',
wallClocks,
wallClocks + 200, // add 200ms to account for padding
wallClocks + 400, // add 400ms to account for padding
1234,
)
@@ -108,7 +111,7 @@ const expectRunsToHaveCorrectStats = (runs = []) => {
run,
'reporterStats.duration',
wallClocks,
wallClocks + 200, // add 200ms to account for padding
wallClocks + 400, // add 400ms to account for padding
1234,
)
@@ -118,11 +121,17 @@ const expectRunsToHaveCorrectStats = (runs = []) => {
run.spec.absolute = e2e.normalizeStdout(run.spec.absolute)
_.each(run.tests, (test) => {
if (test.displayError) {
test.displayError = e2e.normalizeStdout(test.displayError)
}
})
// now make sure that each tests wallclock duration
// is around the sum of all of its timings
run.tests.forEach((test) => {
attempts.forEach((attempt) => {
// cannot sum an object, must use array of values
const timings = _.sumBy(_.values(test.timings), (val) => {
const timings = _.sumBy(_.values(attempt.timings), (val) => {
if (_.isArray(val)) {
// array for hooks
return _.sumBy(val, addFnAndAfterFn)
@@ -137,7 +146,7 @@ const expectRunsToHaveCorrectStats = (runs = []) => {
})
expectDurationWithin(
test,
attempt,
'wallClockDuration',
timings,
timings + 80, // add 80ms to account for padding
@@ -145,21 +154,21 @@ const expectRunsToHaveCorrectStats = (runs = []) => {
)
// now reset all the test timings
normalizeTestTimings(test, 'timings')
normalizeTestTimings(attempt, 'timings')
// normalize stack
if (test.stack) {
test.stack = e2e.normalizeStdout(test.stack)
if (attempt.error) {
attempt.error.stack = e2e.normalizeStdout(attempt.error.stack).trim()
}
if (test.wallClockStartedAt) {
const d = new Date(test.wallClockStartedAt)
if (attempt.wallClockStartedAt) {
const d = new Date(attempt.wallClockStartedAt)
expect(d.toJSON()).to.eq(test.wallClockStartedAt)
test.wallClockStartedAt = STATIC_DATE
expect(d.toJSON()).to.eq(attempt.wallClockStartedAt)
attempt.wallClockStartedAt = STATIC_DATE
expect(test.videoTimestamp).to.be.a('number')
test.videoTimestamp = 9999
expect(attempt.videoTimestamp).to.be.a('number')
attempt.videoTimestamp = 9999
}
})
@@ -252,4 +261,46 @@ describe('e2e spec_isolation', () => {
})
},
})
e2e.it('failing with retries enabled', {
spec: 'simple_failing_hook_spec.coffee',
outputPath,
snapshot: true,
expectedExitCode: 3,
config: {
retries: 1,
},
async onRun (execFn) {
await execFn()
const json = await fs.readJsonAsync(outputPath)
expect(json.config).to.be.an('object')
expect(json.config.projectName).to.eq('e2e')
expect(json.config.projectRoot).to.eq(e2ePath)
json.config = {}
expect(json.browserPath).to.be.a('string')
expect(json.browserName).to.be.a('string')
expect(json.browserVersion).to.be.a('string')
expect(json.osName).to.be.a('string')
expect(json.osVersion).to.be.a('string')
expect(json.cypressVersion).to.be.a('string')
_.extend(json, {
browserPath: 'path/to/browser',
browserName: 'FooBrowser',
browserVersion: '88',
osName: 'FooOS',
osVersion: '1234',
cypressVersion: '9.9.9',
})
expect(json.totalTests).to.eq(_.sum([json.totalFailed, json.totalPassed, json.totalPending, json.totalSkipped]))
expectStartToBeBeforeEnd(json, 'startedTestsAt', 'endedTestsAt')
expectDurationWithin(json, 'totalDuration', _.sumBy(json.runs, 'stats.wallClockDuration'), _.sumBy(json.runs, 'stats.wallClockDuration'), 5555)
expect(json.runs).to.have.length(1)
expectRunsToHaveCorrectStats(json.runs)
snapshot('failing with retries enabled', json)
},
})
})

View File

@@ -192,7 +192,7 @@ const defaultRoutes = [
}, {
method: 'put',
url: '/instances/:id',
req: 'putInstanceRequest@2.0.0',
req: 'putInstanceRequest@3.0.0',
resSchema: 'putInstanceResponse@2.0.0',
res: sendUploadUrls,
}, {
@@ -239,6 +239,7 @@ describe('e2e record', () => {
})
.get('stdout')
.then((stdout) => {
console.log(stdout)
expect(stdout).to.include('Run URL:')
expect(stdout).to.include(runUrl)
@@ -260,6 +261,7 @@ describe('e2e record', () => {
// grab the second set of 5
const secondInstanceSet = urls.slice(5, 10)
console.log(secondInstanceSet)
expect(secondInstanceSet).to.have.members([
`POST /runs/${runId}/instances`,
`PUT /instances/${instanceId}`,
@@ -338,7 +340,7 @@ describe('e2e record', () => {
expect(secondInstancePut.body.error).to.be.null
expect(secondInstancePut.body.tests).to.have.length(2)
expect(secondInstancePut.body.hooks).to.have.length(2)
expect(secondInstancePut.body.hooks).to.have.length(1)
expect(secondInstancePut.body.screenshots).to.have.length(1)
expect(secondInstancePut.body.stats.tests).to.eq(2)
expect(secondInstancePut.body.stats.failures).to.eq(1)
@@ -362,7 +364,7 @@ describe('e2e record', () => {
expect(thirdInstancePut.body.error).to.be.null
expect(thirdInstancePut.body.tests).to.have.length(2)
expect(thirdInstancePut.body.hooks).to.have.length(1)
expect(thirdInstancePut.body.hooks).to.have.length(0)
expect(thirdInstancePut.body.screenshots).to.have.length(1)
expect(thirdInstancePut.body.stats.tests).to.eq(2)
expect(thirdInstancePut.body.stats.passes).to.eq(1)
@@ -387,7 +389,7 @@ describe('e2e record', () => {
expect(fourthInstancePut.body.error).to.be.null
expect(fourthInstancePut.body.tests).to.have.length(1)
expect(fourthInstancePut.body.hooks).to.have.length(1)
expect(fourthInstancePut.body.hooks).to.have.length(0)
expect(fourthInstancePut.body.screenshots).to.have.length(1)
expect(fourthInstancePut.body.stats.tests).to.eq(1)
expect(fourthInstancePut.body.stats.failures).to.eq(1)
@@ -869,7 +871,7 @@ describe('e2e record', () => {
routes[2] = {
method: 'put',
url: '/instances/:id',
req: 'putInstanceRequest@2.0.0',
req: 'putInstanceRequest@3.0.0',
res (req, res) {
return res.sendStatus(500)
},
@@ -1169,7 +1171,7 @@ describe('e2e record', () => {
}, {
method: 'put',
url: '/instances/:id',
req: 'putInstanceRequest@2.0.0',
req: 'putInstanceRequest@3.0.0',
res (req, res) {
return res.sendStatus(500)
},
@@ -1216,7 +1218,7 @@ describe('e2e record', () => {
}, {
method: 'put',
url: '/instances/:id',
req: 'putInstanceRequest@2.0.0',
req: 'putInstanceRequest@3.0.0',
resSchema: 'putInstanceResponse@2.0.0',
res: sendUploadUrls,
}, {
@@ -1287,7 +1289,7 @@ describe('e2e record', () => {
}, {
method: 'put',
url: '/instances/:id',
req: 'putInstanceRequest@2.0.0',
req: 'putInstanceRequest@3.0.0',
resSchema: 'putInstanceResponse@2.0.0',
res: sendUploadUrls,
}, {

View File

@@ -117,7 +117,7 @@ describe('e2e reporters', () => {
.then((xml) => {
expect(xml).to.include('<h3 class="suite-title">simple failing hook spec</h3>')
expect(xml).to.include('<div class="status-item status-item-hooks danger">3 Failed Hooks</div>')
expect(xml).to.not.include('.status-item-hooks')
})
}
@@ -125,10 +125,12 @@ describe('e2e reporters', () => {
.then((json) => {
// mochawesome does not consider hooks to be
// 'failures' but it does collect them in 'other'
// HOWEVER we now change how mocha events fire to make mocha stats reflect ours
expect(json.stats).to.be.an('object')
expect(json.stats.failures).to.eq(0)
expect(json.stats.other).to.eq(3)
expect(json.stats.passes).to.eq(1)
expect(json.stats.failures).to.eq(3)
expect(json.stats.skipped).to.eq(1)
expect(json.stats.other).to.eq(0)
})
})
})

View File

@@ -1 +1,3 @@
{}
{
"retries": null
}

View File

@@ -163,6 +163,13 @@ describe('taking screenshots', () => {
})
})
it('screenshots in a retried test', { retries: 2 }, () => {
cy.screenshot('retrying-test')
.then(() => {
throw new Error('fail')
})
})
it('ensures unique paths for non-named screenshots', () => {
cy.screenshot({ capture: 'runner' })
cy.screenshot({ capture: 'runner' })

View File

@@ -6,6 +6,7 @@ const http = require('http')
const Jimp = require('jimp')
const path = require('path')
const Promise = require('bluebird')
const { useFixedFirefoxResolution } = require('../../../utils')
module.exports = (on, config) => {
let performance = {
@@ -45,13 +46,7 @@ module.exports = (on, config) => {
})
on('before:browser:launch', (browser, options) => {
if (browser.family === 'firefox' && !config.env['NO_RESIZE']) {
// this is needed to ensure correct error screenshot / video recording
// resolution of exactly 1280x720 (height must account for firefox url bar)
options.args = options.args.concat(
['-width', '1280', '-height', '794'],
)
}
useFixedFirefoxResolution(browser, options, config)
if (browser.family === 'firefox' && process.env.FIREFOX_FORCE_STRICT_SAMESITE) {
// @see https://www.jardinesoftware.net/2019/10/28/samesite-by-default-in-2020/

View File

@@ -1,5 +1,8 @@
before(function () {
if (Cypress.browser.family === 'chromium' && Cypress.browser.name !== 'electron') {
import _ from 'lodash'
// we don't use a `before` here since that would show up in run results and cause confusion during test debugging
const before = _.once(function () {
if (Cypress.isBrowser([{ name: '!electron', family: 'chromium' }])) {
return Cypress.automation('remote:debugger:protocol', {
command: 'Emulation.setDeviceMetricsOverride',
params: {
@@ -19,3 +22,5 @@ before(function () {
})
}
})
Cypress.on('test:before:run:async', before)

View File

@@ -85,7 +85,7 @@ export const verify = (ctx, options) => {
// code frames will show `fail(this,()=>` as the 1st line
cy.get('.test-err-code-frame pre span').should('include.text', 'fail(this,()=>')
cy.contains('.test-err-code-frame .runnable-err-file-path', openInIdePath.relative)
cy.contains('.test-err-code-frame .runnable-err-file-path span', openInIdePath.relative)
.click()
.should(() => {
expect(runnerWs.emit.withArgs('open:file')).to.be.calledTwice

View File

@@ -23,3 +23,15 @@ describe('suite', () => {
cy.visit(urls[2])
})
})
describe('navigation error in beforeEach', () => {
before(() => {
cy.visit(urls[1])
})
beforeEach(() => {
cy.visit(urls[2])
})
it('never gets here', () => {})
})

View File

@@ -0,0 +1,8 @@
Cypress._.times 3, () =>
it "cy.screenshot() - replacement", ->
cy.screenshot("replace-me", { capture: "runner" }, {
onAfterScreenshot: (details) ->
expect(details.path).to.include("screenshot-replacement.png")
expect(details.size).to.equal(1047)
expect(details.dimensions).to.eql({ width: 1, height: 1 })
})

Some files were not shown because too many files have changed in this diff Show More