-
-
Save na--/7105cd97ce76edd4d99c33a8ad38c889 to your computer and use it in GitHub Desktop.
import { textSummary } from 'https://jslib.k6.io/k6-summary/0.0.1/index.js'; | |
export function setup() { | |
console.log('helpers.js setup()'); | |
return { foo: 'bar' }; | |
} | |
export function teardown(data) { | |
console.log(`helpers.js teardown(${JSON.stringify(data)})`); | |
} | |
export function handleSummary(data) { | |
console.log('helpers.js handleSummary()'); | |
return { | |
'stdout': textSummary(data, { indent: ' ', enableColors: true, }), | |
'raw-summary-data.json': JSON.stringify(data, null, 4), | |
}; | |
} |
export { default as func01 } from './script1.js'; | |
export { default as func02 } from './script2.js'; | |
export { setup, teardown, handleSummary } from './helpers.js'; | |
export let options = { | |
scenarios: { | |
'first_scenario': { | |
executor: 'constant-vus', | |
vus: 2, | |
duration: '10s', | |
exec: 'func01', | |
}, | |
'second_scenario': { | |
executor: 'constant-vus', | |
vus: 3, | |
duration: '15s', | |
exec: 'func02', | |
}, | |
} | |
} |
import { sleep } from "k6"; | |
export { setup, teardown, handleSummary } from './helpers.js'; | |
export default function () { | |
console.log('script1 main'); | |
sleep(1); | |
} |
import { sleep } from "k6"; | |
export { setup, teardown, handleSummary } from './helpers.js'; | |
export default function () { | |
console.log('script2 main'); | |
sleep(1); | |
} |
The data
that handleSummary
receives is global for the whole test run, not specific for a single scenario. If you want reports per scenarios, the easiest way to get them is to run the scripts separately, e.g. create a bash script that calls k6 for every script:
k6 run script1.js
k6 run script2.js
# ect..., and you can use a loop for the files of course
Alternatively, when grafana/k6#1321 is done or with this workaround grafana/k6-docs#205, it might be easy to create sub-metrics based on the scenario
tag, to differentiate between the metrics in the different scenarios. But you'd still need to sift through them in the single handleSummary
function for the whole test run.
That's actually what I'm doing right now (running bash script, and then printing all the JSON's, then to CSV) ... thank you very much!
(I just thought maybe I can save some code ... but eventually, the handleSummary
got me)
Hey @na--, thanks for the great example.
Last question - How can we separate each
handleSummary
to its unique test, I will explain my self:This is what I'm doing in each of my test suites, each test has its own
handleSummary
And there is one
handleSummaryHelper
In this way, I can separate the file names using this helper.
I know that the
data
in callbackhandleSummary
does not contain any information about the test name whatsoever.