diff --git a/.vale.ini b/.vale.ini new file mode 100644 index 0000000000..e7e35ca2c9 --- /dev/null +++ b/.vale.ini @@ -0,0 +1,6 @@ +StylesPath = .vale + +Vocab = docs + +[*.md] +BasedOnStyles = Vale, write-good \ No newline at end of file diff --git a/.vale/vocab/docs/accept.txt b/.vale/vocab/docs/accept.txt new file mode 100644 index 0000000000..0f4ccf0058 --- /dev/null +++ b/.vale/vocab/docs/accept.txt @@ -0,0 +1,120 @@ +(?i)aws +(?i)boolean +(?i)browserify +(?i)config +(?i)css +(?i)goja +(?i)grafana +(?i)hmac +(?i)html +(?i)http +(?i)https +(?i)json +(?i)param +(?i)params +(?i)statsd +(?i)tcp +(?i)teardown +(?i)tls +(?i)url +(?i)uuid +(?i)vm +(?i)vu +(?i)websocket +(?i)ws +(?i)www +(JS|js)lib +[aA]shburn +[dD]atadog +[eE]nv +[iI]nflux[dD][bB] +[jJ]ava[sS]cript +[nN]amespace +[pP]ipelining +[wW]ebpack +alice +async +browserified +browserlist +bundler +Capitan +corejs +cron +crypto +Cyber +datastore +datetime +dcadwallader +deserialized +Dev +errno +erroring +falsy +flatlined +fn +gravatar +gzip +Hasher +hostname +htop +iftop +impactful +ints +io +j[qQ]uery +jq +loki +Lua +maxfiles +msgs +netstat +nofile +npm +offboarding +Okta +onboarding +Ops +Palo +Paulo +performant +polyfill +polyfills +proto +rawstd +reqs +resampled +rollouts +rollup +Scorigami +scriptable +smockvavelsky +Solaris +Souders +src +stderr +stdout +Stian +subprotocol +sudo +swapon +sysctl +taggable +tbody +tfoot +thead +tm +transpile +transpiled +truthy +tw +ulimit +ulimit +Un +Un\*x +unencoded +unencrypted +untyped +vendored +walkthrough +webpages +wpnonce \ No newline at end of file diff --git a/.vale/write-good/passive.yml_old b/.vale/write-good/passive.yml_old new file mode 100644 index 0000000000..f472cb9049 --- /dev/null +++ b/.vale/write-good/passive.yml_old @@ -0,0 +1,183 @@ +extends: existence +message: "'%s' may be passive voice. Use active voice if you can." +ignorecase: true +level: warning +raw: + - \b(am|are|were|being|is|been|was|be)\b\s* +tokens: + - '[\w]+ed' + - awoken + - beat + - become + - been + - begun + - bent + - beset + - bet + - bid + - bidden + - bitten + - bled + - blown + - born + - bought + - bound + - bred + - broadcast + - broken + - brought + - built + - burnt + - burst + - cast + - caught + - chosen + - clung + - come + - cost + - crept + - cut + - dealt + - dived + - done + - drawn + - dreamt + - driven + - drunk + - dug + - eaten + - fallen + - fed + - felt + - fit + - fled + - flown + - flung + - forbidden + - foregone + - forgiven + - forgotten + - forsaken + - fought + - found + - frozen + - given + - gone + - gotten + - ground + - grown + - heard + - held + - hidden + - hit + - hung + - hurt + - kept + - knelt + - knit + - known + - laid + - lain + - leapt + - learnt + - led + - left + - lent + - let + - lighted + - lost + - made + - meant + - met + - misspelt + - mistaken + - mown + - overcome + - overdone + - overtaken + - overthrown + - paid + - pled + - proven + - put + - quit + - read + - rid + - ridden + - risen + - run + - rung + - said + - sat + - sawn + - seen + - sent + - set + - sewn + - shaken + - shaven + - shed + - shod + - shone + - shorn + - shot + - shown + - shrunk + - shut + - slain + - slept + - slid + - slit + - slung + - smitten + - sold + - sought + - sown + - sped + - spent + - spilt + - spit + - split + - spoken + - spread + - sprung + - spun + - stolen + - stood + - stridden + - striven + - struck + - strung + - stuck + - stung + - stunk + - sung + - sunk + - swept + - swollen + - sworn + - swum + - swung + - taken + - taught + - thought + - thrived + - thrown + - thrust + - told + - torn + - trodden + - understood + - upheld + - upset + - wed + - wept + - withheld + - withstood + - woken + - won + - worn + - wound + - woven + - written + - wrung diff --git a/.vale/write-good/there-is.yml b/.vale/write-good/there-is.yml new file mode 100644 index 0000000000..8b82e8f6cc --- /dev/null +++ b/.vale/write-good/there-is.yml @@ -0,0 +1,6 @@ +extends: existence +message: "Don't start a sentence with '%s'." +ignorecase: false +level: error +raw: + - '(?:[;-]\s)There\s(is|are)|\bThere\s(is|are)\b' diff --git a/.vale/write-good/weasel.yml_old b/.vale/write-good/weasel.yml_old new file mode 100644 index 0000000000..e29391444b --- /dev/null +++ b/.vale/write-good/weasel.yml_old @@ -0,0 +1,207 @@ +extends: existence +message: "'%s' is a weasel word!" +ignorecase: true +level: warning +tokens: + - absolutely + - accidentally + - additionally + - allegedly + - alternatively + - angrily + - anxiously + - approximately + - awkwardly + - badly + - barely + - beautifully + - blindly + - boldly + - bravely + - brightly + - briskly + - bristly + - bubbly + - busily + - calmly + - carefully + - carelessly + - cautiously + - cheerfully + - clearly + - closely + - coldly + - completely + - consequently + - correctly + - courageously + - crinkly + - cruelly + - crumbly + - cuddly + - currently + - daily + - daringly + - deadly + - definitely + - deliberately + - doubtfully + - dumbly + - eagerly + - early + - easily + - elegantly + - enormously + - enthusiastically + - equally + - especially + - eventually + - exactly + - exceedingly + - exclusively + - extremely + - fairly + - faithfully + - fatally + - fiercely + - finally + - fondly + - few + - foolishly + - fortunately + - frankly + - frantically + - generously + - gently + - giggly + - gladly + - gracefully + - greedily + - happily + - hardly + - hastily + - healthily + - heartily + - helpfully + - honestly + - hourly + - hungrily + - hurriedly + - immediately + - impatiently + - inadequately + - ingeniously + - innocently + - inquisitively + - interestingly + - irritably + - jiggly + - joyously + - justly + - kindly + - largely + - lately + - lazily + - likely + - literally + - lonely + - loosely + - loudly + - loudly + - luckily + - madly + - many + - mentally + - mildly + - monthly + - mortally + - mostly + - mysteriously + - neatly + - nervously + - nightly + - noisily + - normally + - obediently + - occasionally + - only + - openly + - painfully + - particularly + - patiently + - perfectly + - politely + - poorly + - powerfully + - presumably + - previously + - promptly + - punctually + - quarterly + - quickly + - quietly + - rapidly + - rarely + - really + - recently + - recklessly + - regularly + - remarkably + - relatively + - reluctantly + - repeatedly + - rightfully + - roughly + - rudely + - sadly + - safely + - selfishly + - sensibly + - seriously + - sharply + - shortly + - shyly + - significantly + - silently + - simply + - sleepily + - slowly + - smartly + - smelly + - smoothly + - softly + - solemnly + - sparkly + - speedily + - stealthily + - sternly + - stupidly + - substantially + - successfully + - suddenly + - surprisingly + - suspiciously + - swiftly + - tenderly + - tensely + - thoughtfully + - tightly + - timely + - truthfully + - unexpectedly + - unfortunately + - usually + - very + - victoriously + - violently + - vivaciously + - warmly + - waverly + - weakly + - wearily + - weekly + - wildly + - wisely + - worldly + - wrinkly + - yearly diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e6e3370451..3226116539 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,9 +1,8 @@ -Contributing to the k6 documentation -==================================== +# Contributing to the k6 documentation Thank you for your interest in contributing to k6! -(ノ◕ヮ◕)ノ*:・゚✧ +(ノ ◕ ヮ ◕)ノ\*:・゚ ✧ Before you begin, make sure to familiarize yourself with the [Code of Conduct](CODE_OF_CONDUCT.md). If you've previously contributed to other open source project, you may recognize it as the classic [Contributor Covenant](https://contributor-covenant.org/). @@ -15,8 +14,8 @@ The k6 documentation is a Gatsby application using React components and markdown There are two types of pages: Welcome Pages and Documentation articles. -[Welcome Pages](src/templates/docs) are the pages shown on the header menu: `Guides`, `Javascript API`, `Cloud Docs`, `Integration`, and `Examples`. They are made as separate React Components for maximum customisation. +[Welcome Pages](src/templates/docs) are the pages shown on the header menu: `Guides`, `JavaScript API`, `Cloud Docs`, `Integration`, and `Examples`. They are made as separate React Components for maximum customisation. Documentation articles are markdown files structured under the [`src/data/markdown/docs`](src/data/markdown/docs) folder. -If you want to know more about how to edit these pages, read more about the [File format](CONTRIBUTING_FILE_FORMAT.md) \ No newline at end of file +If you want to know more about how to edit these pages, read more about the [File format](CONTRIBUTING_FILE_FORMAT.md) diff --git a/CONTRIBUTING_FILE_FORMAT.md b/CONTRIBUTING_FILE_FORMAT.md index 8ec0eadeea..ae34c9fe88 100644 --- a/CONTRIBUTING_FILE_FORMAT.md +++ b/CONTRIBUTING_FILE_FORMAT.md @@ -4,7 +4,7 @@ The k6 documentation is a Gatsby application using React components and markdown There are two types of pages: Welcome Pages and Documentation articles. -[Welcome Pages](src/templates/docs) are the pages shown on the header menu: `Guides`, `Javascript API`, `Cloud Docs`, `Integration`, and `Examples`. They are made as separate React Components for maximum customisation. +[Welcome Pages](src/templates/docs) are the pages shown on the header menu: `Guides`, `JavaScript API`, `Cloud Docs`, `Integration`, and `Examples`. They are made as separate React Components for maximum customisation. Documentation articles are markdown files structured under the [`src/data/markdown/docs`](src/data/markdown/docs) folder. @@ -316,7 +316,7 @@ See _'Then do this?'_ text line between tabs? **You can not do that**. Put nothi ```javascript for (var id = 1; id <= 100; id++) { - http.get(http.url`http://example.com/posts/${id}`) + http.get(http.url`http://example.com/posts/${id}`) } ``` diff --git a/package.json b/package.json index e919d0a64e..3f43346ac5 100644 --- a/package.json +++ b/package.json @@ -13,6 +13,8 @@ "predevelop": "test -f ./.env.development || cp ./.env.example ./.env.development", "develop": "gatsby develop", "lint": "eslint src/", + "prelint:prose": "which vale || (echo 'Requires Vale. See https://docs.errata.ai/vale/install for instructions.' && exit 1)", + "lint:prose": " vale --glob='*.md' ./src/data/markdown", "lint:fix": "eslint src/ --fix", "precheck:links": "gatsby build", "check:links": "concurrently --kill-others -s first \"gatsby serve --port 8000\" \"npm run check:blc\"", @@ -83,4 +85,4 @@ "lint-staged": "^10.2.11", "wait-on": "^5.2.0" } -} +} \ No newline at end of file diff --git a/src/data/markdown/docs/01 guides/01 Getting started/02 Installation.md b/src/data/markdown/docs/01 guides/01 Getting started/02 Installation.md index 37e4af3699..da60b5df1e 100644 --- a/src/data/markdown/docs/01 guides/01 Getting started/02 Installation.md +++ b/src/data/markdown/docs/01 guides/01 Getting started/02 Installation.md @@ -4,7 +4,6 @@ title: 'Installation' ## Linux - ### Debian/Ubuntu
@@ -20,15 +19,15 @@ sudo apt-get install k6 > ### ⚠️ If you are behind a firewall or proxy > -> There have been reports of users being unable to download the key from Ubuntu's keyserver using `apt-key` +> There have been reports of users being unable to download the key from Ubuntu's key-server using `apt-key` > command due to firewalls or proxies blocking their requests. If you experience this issue, you may try this > alternative approach instead: -> +> > ``` > wget -q -O - https://bintray.com/user/downloadSubjectPublicKey?username=bintray | sudo apt-key add - > ``` -### Redhat/CentOS +### Red Hat/CentOS
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output.md b/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output.md index bd36ec5b4b..87a5fcb8c8 100755 --- a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output.md +++ b/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output.md @@ -36,7 +36,7 @@ duration: 1m0s, iterations: - - `duration: 1m0s` the test run [duration](/using-k6/options#duration). - `iterations: -` the total number of VU [iterations](https://k6.io/docs/using-k6/options#iterations). - `vus: 100` the initial number of VUs that test will start running. -- `max: 100` the maximun number of VUs that the test will scale. +- `max: 100` the maximum number of VUs that the test will scale. ### Test summary @@ -97,17 +97,17 @@ k6 can send more granular result data to different outputs to integrate and visu The list of output plugins are: -| Plugin | Usage | -| ----------------------------------------------------- | ------------------------------------------------------------------------------- | -| [Amazon CloudWatch](/results-visualization/amazon-cloudwatch) | `k6 run --out statsd` | -| [Apache Kafka](/results-visualization/apache-kafka) | `k6 run --out kafka` | -| [Cloud](/results-visualization/cloud) | `k6 run --out cloud` | -| [CSV](/results-visualization/csv) | `k6 run --out csv` | -| [Datadog](/results-visualization/datadog) | `k6 run --out datadog` | -| [InfluxDB](/results-visualization/influxdb-+-grafana) | `k6 run --out influxdb` | -| [JSON](/results-visualization/json) | `k6 run --out json` | -| [New Relic](/results-visualization/new-relic) | `k6 run --out statsd` | -| [StatsD](/results-visualization/statsd) | `k6 run --out statsd` | +| Plugin | Usage | +| ------------------------------------------------------------- | ----------------------- | +| [Amazon CloudWatch](/results-visualization/amazon-cloudwatch) | `k6 run --out statsd` | +| [Apache Kafka](/results-visualization/apache-kafka) | `k6 run --out kafka` | +| [Cloud](/results-visualization/cloud) | `k6 run --out cloud` | +| [CSV](/results-visualization/csv) | `k6 run --out csv` | +| [Datadog](/results-visualization/datadog) | `k6 run --out datadog` | +| [InfluxDB](/results-visualization/influxdb-+-grafana) | `k6 run --out influxdb` | +| [JSON](/results-visualization/json) | `k6 run --out json` | +| [New Relic](/results-visualization/new-relic) | `k6 run --out statsd` | +| [StatsD](/results-visualization/statsd) | `k6 run --out statsd` | ## Multiple outputs diff --git a/src/data/markdown/docs/01 guides/01 Getting started/05 Community.md b/src/data/markdown/docs/01 guides/01 Getting started/05 Community.md index c36f82391a..0764410c82 100644 --- a/src/data/markdown/docs/01 guides/01 Getting started/05 Community.md +++ b/src/data/markdown/docs/01 guides/01 Getting started/05 Community.md @@ -40,11 +40,10 @@ channel on [Slack](https://k6.io/slack/). ## Contributing Interested in contributing to the k6 project? We're super-excited to have you! 🥳 -There are multiple ways in which you could contribute to the project, you'll find -some ideas below: +You can contribute to the project in multiple ways. You'll find some ideas below: - Report bugs and features. -- Contributing to the development through open issues on Github If the issue is complex, +- Contributing to the development through open issues on GitHub If the issue is complex, it's usually a good idea to discuss your approach in the issue before you start writing code. We have issues labeled as good first issue that have a limited scope. - Blog about k6. Contact us on [Slack](https://k6.io/slack/) if you would like your post to @@ -60,4 +59,4 @@ some ideas below: | I got this error and I'm sure it's a bug | [File an issue](https://github.com/loadimpact/k6/issues) | | Why do you? When will you? | [Slack](https://k6.io/slack/) | | I want to contribute/help with development | Start here, then proceed to [Slack](https://k6.io/slack/) and [issues](https://github.com/loadimpact/k6/issues) | -| I want to write or give a talk about k6 | Reach us on [Slack](https://k6.io/slack/) to see how we can help you | +| I want to write or give a talk about k6 | Reach us on [Slack](https://k6.io/slack/) to see how we can help you | diff --git a/src/data/markdown/docs/01 guides/02 Using k6/02 Metrics.md b/src/data/markdown/docs/01 guides/02 Using k6/02 Metrics.md index 7e4840f8b3..f9d4a247b7 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/02 Metrics.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/02 Metrics.md @@ -18,7 +18,7 @@ The following _built-in_ metrics will **always** be collected by k6: | Metric Name | Type | Description | | -------------------- | ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `vus` | Gauge | Current number of active virtual users | -| `vus_max` | Gauge | Max possible number of virtual users (VU resources are preallocated, to ensure performance will not be affected when scaling up the load level) | +| `vus_max` | Gauge | Max possible number of virtual users (VU resources are pre-allocated, to ensure performance will not be affected when scaling up the load level) | | `iterations` | Counter | The aggregate number of times the VUs in the test have executed the JS script (the `default` function). | | `iteration_duration` | Trend | The time it took to complete one full iteration of the default/main function. | | `dropped_iterations` | Counter | Introduced in k6 v0.27.0, the number of iterations that could not be started due to lack of VUs (for the arrival-rate executors) or lack of time (due to expired maxDuration in the iteration-based executors). | @@ -28,7 +28,7 @@ The following _built-in_ metrics will **always** be collected by k6: ## HTTP-specific built-in metrics -There are also _built-in_ metrics that will only be generated when/if HTTP requests are made: +_built-in_ metrics will only be generated when/if HTTP requests are made: | Metric Name | Type | Description | | -------------------------- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -63,7 +63,7 @@ In the above snippet, `res` is an [HTTP Response](/javascript-api/k6-http/respon | ----------------------------- | --------------------------------------------------------------------- | | `res.body` | `string` containing the HTTP response body | | `res.headers` | `object` containing header-name/header-value pairs | -| `res.status` | `integer` contaning HTTP response code received from server | +| `res.status` | `integer` containing HTTP response code received from server | | `res.timings` | `object` containing HTTP timing information for the request in **ms** | | `res.timings.blocked` | = `http_req_blocked` | | `res.timings.connecting` | = `http_req_connecting` | @@ -100,7 +100,7 @@ Custom metrics will be reported at the end of a test. Here is how the output mig ## Metric types -All metrics (both the _built-in_ ones and the custom ones) have a type. There are four different metrics types: +All metrics (both the _built-in_ ones and the custom ones) have a type. The four different metric types in k6 are: - [Counter](/javascript-api/k6-metrics/counter) - [Gauge](/javascript-api/k6-metrics/gauge) diff --git a/src/data/markdown/docs/01 guides/02 Using k6/04 Thresholds.md b/src/data/markdown/docs/01 guides/02 Using k6/04 Thresholds.md index ed4b1efe92..8b54ce9b55 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/04 Thresholds.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/04 Thresholds.md @@ -190,14 +190,15 @@ Examples: - `p(90) < 300` // 90% of samples must be below 300 A threshold expression evaluates to `true` or `false`. -There are four metric types in k6, and each metric type provides its own set of aggregation methods which can be used in threshold expressions. - -| Metric type | Aggregation methods | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Counter | `count` and `rate` | -| Gauge | `value` | -| Rate | `rate` | -| Trend | `avg`, `min`, `max`, `med` and `p(N)` where `N` is a number between 0.0 and 100.0 meaning the percentile value to look at, eg. `p(99.99)` means the 99.99th percentile. The unit for these values is milliseconds. | + +Each of the four metric types included in k6 provide its own set of aggregation methods usable in threshold expressions. + +| Metric type | Aggregation methods | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Counter | `count` and `rate` | +| Gauge | `value` | +| Rate | `rate` | +| Trend | `avg`, `min`, `max`, `med` and `p(N)` where `N` is a number between 0.0 and 100.0 meaning the percentile value to look at, e.g. `p(99.99)` means the 99.99th percentile. The unit for these values is milliseconds. | Here is a (slightly contrived) sample script that uses all different types of metrics, and sets different types of thresholds for them: diff --git a/src/data/markdown/docs/01 guides/02 Using k6/05 Options.md b/src/data/markdown/docs/01 guides/02 Using k6/05 Options.md index 430b2fe801..f508ef1217 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/05 Options.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/05 Options.md @@ -431,7 +431,7 @@ A number specifying a fixed number of iterations to execute of the script, as op a duration of time during which the script would run in a loop. \*Note: The number of iterations is split between all VUs. Available in the `k6 run` and since v0.27.0 in the -`k6 cloud` command as well. Tests that utilize the cloud require a duration as "infinite" tests are not allowed, +`k6 cloud` command as well. Tests that utilize the cloud require a duration as "infinite" tests are not allowed, the default `maxDuration` is 10 minutes when using iterations with the cloud service. | Env | CLI | Code / Config file | Default | @@ -489,27 +489,26 @@ Possible values are: - none - disable - stdout - send to the standard output - stderr - send to the standard error output (this is the default) -- loki - send logs to a loki server +- loki - send logs to a loki server The loki can additionally be configured as follows: `loki=http://127.0.0.1:3100/loki/api/v1/push,label.something=else,label.foo=bar,limit=32,level=info,pushPeriod=5m32s,msgMaxSize=1231` -Where all but the url in the beginning are not required. +Where all but the url in the beginning are not required. The possible keys with their meanings and default values: -| key | meaning | default value | -| ------------- | ------------------------------------------------------------------ | ---------------------------------------- | -| `nothing` | the endpoint to which to send logs | `http://127.0.0.1:3100/loki/api/v1/push` | -| label.`labelName` | adds an additional label with the provided key and value to each message | N/A | -| limit | the limit of message per pushPeriod, an additonal log is send when the limit is reached, logging how many logs were dropped | 100 | -| level | the minimal level of a message so it's send to loki | all | -| pushPeriod | at what period to send log lines | 1s | -| profile | whether to print some info about performance of the sending to loki | false | -| msgMaxSize | how many symbols can there be at most in a message. Messages bigger will miss the middle of the message with an additonal few characters explaining how many characters were dropped. | 1048576 | +| key | meaning | default value | +| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------- | +| `nothing` | the endpoint to which to send logs | `http://127.0.0.1:3100/loki/api/v1/push` | +| label.`labelName` | adds an additional label with the provided key and value to each message | N/A | +| limit | the limit of message per pushPeriod, an additional log is send when the limit is reached, logging how many logs were dropped | 100 | +| level | the minimal level of a message so it's send to loki | all | +| pushPeriod | at what period to send log lines | 1s | +| profile | whether to print some info about performance of the sending to loki | false | +| msgMaxSize | how many symbols can there be at most in a message. Messages bigger will miss the middle of the message with an additional few characters explaining how many characters were dropped. | 1048576 | - -| Env | CLI | Code / Config file | Default | -| --------------- | ---------------- | ------------------ | -------- | -| `K6_LOG_OUTPUT` | `--log-output` | N/A | `stderr` | +| Env | CLI | Code / Config file | Default | +| --------------- | -------------- | ------------------ | -------- | +| `K6_LOG_OUTPUT` | `--log-output` | N/A | `stderr` |
@@ -523,13 +522,12 @@ $ k6 run --log-output=stdout script.js A value specifying the log format. By default, k6 includes extra debug information like date and log level. The other options available are: -- `json`: print all the debug information in JSON format. - -- `raw`: print only the log message. +- `json`: print all the debug information in JSON format. +- `raw`: print only the log message. -| Env | CLI | Code / Config file | Default | -| ----------- | ---------------------- | ------------------ | ------- | +| Env | CLI | Code / Config file | Default | +| -------------- | ------------------- | ------------------ | ------- | | `K6_LOGFORMAT` | `--logformat`, `-f` | N/A | |
@@ -694,7 +692,7 @@ export let options = { ### Results Output -Specify the results output. Please go to [Results ouput](/getting-started/results-output) for more information +Specify the results output. Please go to [Results output](/getting-started/results-output) for more information on all output plugins available and how to configure them. Since version 0.21, this option can be specified multiple times. Available in `k6 run` command. @@ -729,9 +727,9 @@ export let options = {
-> #### Cloud runs +> #### Considerations when running in the cloud > -> There are a couple of considerations with this option when running cloud tests. The option is set per load generator which means that the value you set in the options object of your test script will be multiplied by the number of load generators your test run is using. At the moment we are hosting 300 VUs per load generator instance. In practice that means that if you set the option for 100 rps, and run a test with 1000 VUs, you will spin up 4 load gen instances and effective rps limit of your test run will be 400 +> The option is set per load generator which means that the value you set in the options object of your test script will be multiplied by the number of load generators your test run is using. At the moment we are hosting 300 VUs per load generator instance. In practice that means that if you set the option for 100 rps, and run a test with 1000 VUs, you will spin up 4 load gen instances and effective rps limit of your test run will be 400 ### Scenarios @@ -1111,7 +1109,7 @@ export let options = { A number specifying max number of virtual users, if more than `vus`. This option is typically used when the intent is to dynamically scale the amount of VUs up and down during the test using the `k6 scale` command. Since instantiating a VU is an expensive operation in k6 this option -is used to preallocate `vusMax` number of VUs. Available in `k6 run` and `k6 cloud` commands. +is used to pre-allocate `vusMax` number of VUs. Available in `k6 run` and `k6 cloud` commands. | Env | CLI | Code / Config file | Default | | ------------ | ------------- | ------------------ | --------------- | diff --git a/src/data/markdown/docs/01 guides/02 Using k6/06 Test life cycle.md b/src/data/markdown/docs/01 guides/02 Using k6/06 Test life cycle.md index 6459c199bf..4716839cc8 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/06 Test life cycle.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/06 Test life cycle.md @@ -1,11 +1,10 @@ --- -title: "Test life cycle" -excerpt: "" +title: 'Test life cycle' +excerpt: '' --- -There are four distinct life cycle stages to a k6 test that can be controlled by you, the user. -They are the "init", "setup", "vu" and "teardown" stages. We also refer to it as "init code", -"VU code" etc. in the documentation. +The four distinct life cycle stages in a k6 test are "init", "setup", "vu" and "teardown" +Throughout the documentation, you will also see us referring to it as "init code", "VU code" etc.
@@ -16,7 +15,7 @@ export function setup() { // 2. setup code } -export default function(data) { +export default function (data) { // 3. vu code } @@ -35,10 +34,11 @@ for your VUs, similar to the `main()` function in many other languages:
```js -export default function() { +export default function () { // do things here... } ``` +
_"Why not just run my script normally, from top to bottom"_, you might ask - the answer is: we @@ -51,7 +51,7 @@ VU code can make HTTP requests, emit metrics, and generally do everything you'd test to do - with a few important exceptions: you can't load anything from your local filesystem, or import any other modules. This all has to be done from the init code. -There are two reasons for this. The first is, of course: performance. +We have two reasons for this. The first is, of course: performance. If you read a file from disk on every single script iteration, it'd be needlessly slow; even if you cache the contents of the file and any imported modules, it'd mean the _first run_ of the @@ -74,7 +74,7 @@ As an added bonus, you can use this to reuse data between iterations (but only f ```javascript var counter = 0; -export default function() { +export default function () { counter++; } ``` @@ -83,21 +83,20 @@ export default function() { ## The default function life-cycle -A VU will execute the default function from start to end in sequence. Nothing out of the ordinary -so far, but here's the important part; once the VU reaches the end of the default function it will +A VU will execute the default function from start to end in sequence. Nothing out of the ordinary +so far, but here's the important part; once the VU reaches the end of the default function it will loop back to the start and execute the code all over. -As part of this "restart" process, the VU is reset. Cookies are cleared and TCP connections +As part of this "restart" process, the VU is reset. Cookies are cleared and TCP connections might be torn down, depending on your test configuration options. -> Make sure to use `sleep()` statements to pace your VUs properly. An appropriate amount of -> sleep/think time at the end of the default function is often needed to properly simulate a -> user reading content on a page. If you don't have a `sleep()` statement at the end of +> Make sure to use `sleep()` statements to pace your VUs properly. An appropriate amount of +> sleep/think time at the end of the default function is often needed to properly simulate a +> user reading content on a page. If you don't have a `sleep()` statement at the end of > the default function your VU might be more "aggressive" than you've planned. > > VU without any `sleep()` is akin to a user who constantly presses F5 to refresh the page. - ## Setup and teardown stages Beyond the required init and VU stages, which is code run for each VU, k6 also supports test-wide @@ -119,7 +118,7 @@ export function setup() { // 2. setup code } -export default function(data) { +export default function (data) { // 3. vu code } @@ -151,13 +150,13 @@ export function setup() { return { v: 1 }; } -export default function(data) { +export default function (data) { console.log(JSON.stringify(data)); } export function teardown(data) { if (data.v != 1) { - throw new Error("incorrect data: " + JSON.stringify(data)); + throw new Error('incorrect data: ' + JSON.stringify(data)); } } ``` @@ -172,7 +171,7 @@ stages: ```js export function setup() { - let res = http.get("https://httpbin.org/get"); + let res = http.get('https://httpbin.org/get'); return { data: res.json() }; } @@ -180,7 +179,7 @@ export function teardown(data) { console.log(JSON.stringify(data)); } -export default function(data) { +export default function (data) { console.log(JSON.stringify(data)); } ``` @@ -193,7 +192,8 @@ for the `group` metric tag, so that you can filter them in JSON output or Influx ## Skip setup and teardown execution -There are two CLI options that can be used to skip the execution of setup and teardown stages. +It is possible to skip the execution of setup and teardown stages using the two options `--no-setup` and +`--no-teardown` respectively.
diff --git a/src/data/markdown/docs/01 guides/02 Using k6/07 Modules.md b/src/data/markdown/docs/01 guides/02 Using k6/07 Modules.md index 92c04b4503..66ffeabbc9 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/07 Modules.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/07 Modules.md @@ -47,7 +47,7 @@ export default function () { ### Remote HTTP(S) modules -These modules are accessed over HTTP(S), from a source like [the k6 jslib](#the-jslib-repository) or +These modules are accessed over HTTP(S), from a source like [the k6 jsLIB](#the-jslib-repository) or from any publicly accessible web server. The imported modules will be downloaded and executed at runtime, making it extremely important to **make sure the code is legit and trusted before including it in a test script**. @@ -125,7 +125,7 @@ Usually, this is not a big problem as each application only allocates these reso By running code requiring additional features on top of ES5.1, we also need additional extensions to the javascript vm, further boosting the resource usage. This is the default mode of k6. -When bundling using the configuration described in this article, babel and corejs automatically adds the features needed, thus allowing us to run our script without these extensions, using `--compatibility-mode=base`. For more details on the performance benefits of running in the base compability mode, see [this article](/using-k6/javascript-compatibility-mode#performance-comparison). +When bundling using the configuration described in this article, babel and corejs automatically adds the features needed, thus allowing us to run our script without these extensions, using `--compatibility-mode=base`. For more details on the performance benefits of running in the base compatibility mode, see [this article](/using-k6/javascript-compatibility-mode#performance-comparison). ### Setting up the bundler @@ -167,7 +167,7 @@ $ npm install --save-dev \ | ----------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [webpack](https://github.com/webpack/webpack) | The bundler part of Webpack | | [webpack-cli](https://github.com/webpack/webpack-cli) | The CLI part of Webpack, which allows us to use it from the terminal | -| [k6](https://github.com/loadimpact/k6) | A dummy package used to provide typeahead in VSCode and similar | +| [k6](https://github.com/loadimpact/k6) | A dummy package used to provide type-ahead in VSCode and similar | | [babel-loader](https://github.com/babel/babel-loader) | A loader used by Webpack to leverage babel functionality while bundling | | [@babel/core](https://github.com/babel/babel/tree/master/packages/babel-core) | The core functionality of Babel | | [@babel/preset-env](https://github.com/babel/babel/tree/master/packages/babel-preset-env) | A smart preset using [browserlist](https://github.com/browserslist/browserslist), [compat-table](https://github.com/kangax/compat-table) and [electron-to-chromium](https://github.com/Kilian/electron-to-chromium) to determine what code to transpile and polyfill. | diff --git a/src/data/markdown/docs/01 guides/02 Using k6/09 Cookies.md b/src/data/markdown/docs/01 guides/02 Using k6/09 Cookies.md index 3891fbb15f..6844939e65 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/09 Cookies.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/09 Cookies.md @@ -22,7 +22,7 @@ For most intents and purposes k6 will transparently manage the receiving, storag cookies as described above, so that testing of your cookie-based web site or app will just work without you having to do anything special. -There are however use cases where more control over cookies is desired. In k6 you have two +In some use cases, you might desire more control over the cookies. In k6 you have two options, [either to directly manipulate HTTP headers](/javascript-api/k6-http/params), or use the more ergonomic cookie API. We will go through the latter below. diff --git a/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/01 HTTP-2.md b/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/01 HTTP-2.md index b2da9e91e8..110551a89e 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/01 HTTP-2.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/01 HTTP-2.md @@ -1,20 +1,20 @@ --- -title: "HTTP/2" -excerpt: "" +title: 'HTTP/2' +excerpt: '' --- ## Overview HTTP/2.0 is the latest version of the HTTP protocol and introduces some major improvements compared to its predecessor. Chiefly of which is the introduction of a binary wire protocol with multiplexed streams over a single TCP connection. This solves a long-standing performance issue with HTTP/1.1, [head-of-line blocking](https://en.wikipedia.org/wiki/Head-of-line_blocking). -Well, it at least *partially* solves it, since you still have TCP congestion control mechanisms interfering with the intended independent nature of the multiplexed streams in cases of lost/dropped packets and retransmission/reassembly. The full solution is to run HTTP/2.0 over UDP, which is what Google implemented with [QUIC](https://en.wikipedia.org/wiki/QUIC). +Well, it at least _partially_ solves it, since you still have TCP congestion control mechanisms interfering with the intended independent nature of the multiplexed streams in cases of lost/dropped packets and retransmission/reassembly. The full solution is to run HTTP/2.0 over UDP, which is what Google implemented with [QUIC](https://en.wikipedia.org/wiki/QUIC). ## Additional features of HTTP/2.0 -* Builtin compression of HTTP headers -* Server push -* Pipelining of requests -* Prioritization of requests +- Builtin compression of HTTP headers +- Server push +- PipelininG of requests +- Prioritization of requests ## Load testing HTTP/2 with k6 @@ -23,21 +23,21 @@ When you make HTTP requests in k6 it will automatically upgrade the connection t
```javascript - import http from "k6/http"; - import { check, sleep } from "k6"; - - export default function() { - const res = http.get("https://test-api.k6.io/"); - check(res, { - "protocol is HTTP/2": (r) => r.proto === 'HTTP/2.0' - }); - sleep(1); - } +import http from 'k6/http'; +import { check, sleep } from 'k6'; + +export default function () { + const res = http.get('https://test-api.k6.io/'); + check(res, { + 'protocol is HTTP/2': (r) => r.proto === 'HTTP/2.0', + }); + sleep(1); +} ```
For more information on what values the r.proto field can have, check out: -* [k6 HTTP](/javascript-api/k6-http/response) -* https://http2.github.io/http2-spec/#versioning +- [k6 HTTP](/javascript-api/k6-http/response) +- https://http2.github.io/http2-spec/#versioning diff --git a/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/02 WebSockets.md b/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/02 WebSockets.md index 71fdcb8a1e..2d3e1f047a 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/02 WebSockets.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/02 WebSockets.md @@ -1,6 +1,6 @@ --- -title: "WebSockets" -excerpt: "" +title: 'WebSockets' +excerpt: '' --- ## Overview @@ -9,27 +9,27 @@ excerpt: "" ## Load testing WebSockets with k6 -There are some differences in the structure and inner workings of a test when comparing HTTP based to WebSocket based ones. The primary difference is that instead of continuously looping the main function (`export default function() { ... }`) over an over, each VU is now setup to run an asynchronous event loop. +Comparing HTTP based tests to WebSocket ones, there are some differences in the structure and inner workings. The primary difference is that instead of continuously looping the main function (`export default function() { ... }`) over an over, each VU is now setup to run an asynchronous event loop. The basic structure of a WebSocket test looks like this:
```javascript -import ws from "k6/ws"; -import { check } from "k6"; +import ws from 'k6/ws'; +import { check } from 'k6'; -export default function() { - const url = "ws://echo.websocket.org"; - const params = { tags: { my_tag: "hello" } }; +export default function () { + const url = 'ws://echo.websocket.org'; + const params = { tags: { my_tag: 'hello' } }; - const res = ws.connect(url, params, function(socket) { - socket.on("open", () => console.log("connected")); - socket.on("message", data => console.log("Message received: ", data)); - socket.on("close", () => console.log("disconnected")); + const res = ws.connect(url, params, function (socket) { + socket.on('open', () => console.log('connected')); + socket.on('message', (data) => console.log('Message received: ', data)); + socket.on('close', () => console.log('disconnected')); }); - check(res, { "status is 101": r => r && r.status === 101 }); + check(res, { 'status is 101': (r) => r && r.status === 101 }); } ``` @@ -78,35 +78,34 @@ If you want to schedule a recurring action you can use the [socket.setInterval](
```javascript -import ws from "k6/ws"; -import { check } from "k6"; +import ws from 'k6/ws'; +import { check } from 'k6'; -export default function() { - const url = "ws://echo.websocket.org"; - const params = { tags: { my_tag: "hello" } }; +export default function () { + const url = 'ws://echo.websocket.org'; + const params = { tags: { my_tag: 'hello' } }; - const res = ws.connect(url, params, function(socket) { - socket.on("open", function open() { - console.log("connected"); + const res = ws.connect(url, params, function (socket) { + socket.on('open', function open() { + console.log('connected'); socket.setInterval(function timeout() { socket.ping(); - console.log("Pinging every 1sec (setInterval test)"); + console.log('Pinging every 1sec (setInterval test)'); }, 1000); }); - socket.on("ping", () => console.log("PING!")); - socket.on("pong", () => console.log("PONG!")); - socket.on("close", () => console.log("disconnected")); + socket.on('ping', () => console.log('PING!')); + socket.on('pong', () => console.log('PONG!')); + socket.on('close', () => console.log('disconnected')); }); - check(res, { "status is 101": r => r && r.status === 101 }); + check(res, { 'status is 101': (r) => r && r.status === 101 }); } ```
- ## Timeouts You can add a timeout to the WebSocket connection by passing a handler function as well as the @@ -115,24 +114,24 @@ timeout value (in milliseconds) to the [socket.setTimeout](/javascript-api/k6-ws
```javascript -import ws from "k6/ws"; -import { check } from "k6"; +import ws from 'k6/ws'; +import { check } from 'k6'; -export default function() { - const url = "ws://echo.websocket.org"; - const params = { tags: { my_tag: "hello" } }; +export default function () { + const url = 'ws://echo.websocket.org'; + const params = { tags: { my_tag: 'hello' } }; - const res = ws.connect(url, params, function(socket) { - socket.on("open", () => console.log("connected")); - socket.on("close", () => console.log("disconnected")); + const res = ws.connect(url, params, function (socket) { + socket.on('open', () => console.log('connected')); + socket.on('close', () => console.log('disconnected')); - socket.setTimeout(function() { - console.log("2 seconds passed, closing the socket"); + socket.setTimeout(function () { + console.log('2 seconds passed, closing the socket'); socket.close(); }, 2000); }); - check(res, { "status is 101": r => r && r.status === 101 }); + check(res, { 'status is 101': (r) => r && r.status === 101 }); } ``` @@ -147,46 +146,46 @@ You can attach multiple handler functions to an event as the code below illustra
```javascript -import ws from "k6/ws"; -import { check } from "k6"; +import ws from 'k6/ws'; +import { check } from 'k6'; -export default function() { - const url = "ws://echo.websocket.org"; - const params = { tags: { my_tag: "hello" } }; +export default function () { + const url = 'ws://echo.websocket.org'; + const params = { tags: { my_tag: 'hello' } }; - const response = ws.connect(url, params, function(socket) { - socket.on("open", function open() { - console.log("connected"); + const response = ws.connect(url, params, function (socket) { + socket.on('open', function open() { + console.log('connected'); socket.send(Date.now()); socket.setInterval(function timeout() { socket.ping(); - console.log("Pinging every 1sec (setInterval test)"); + console.log('Pinging every 1sec (setInterval test)'); }, 1000); }); - socket.on("ping", () => console.log("PING!")); - socket.on("pong", () => console.log("PONG!")); - socket.on("pong", () => { + socket.on('ping', () => console.log('PING!')); + socket.on('pong', () => console.log('PONG!')); + socket.on('pong', () => { // Multiple event handlers on the same event - console.log("OTHER PONG!"); + console.log('OTHER PONG!'); }); - socket.on("close", () => console.log("disconnected")); + socket.on('close', () => console.log('disconnected')); - socket.on("error", (e) => { - if (e.error() != "websocket: close sent") { - console.log("An unexpected error occured: ", e.error()); + socket.on('error', (e) => { + if (e.error() != 'websocket: close sent') { + console.log('An unexpected error occured: ', e.error()); } }); - socket.setTimeout(function() { - console.log("2 seconds passed, closing the socket"); + socket.setTimeout(function () { + console.log('2 seconds passed, closing the socket'); socket.close(); }, 2000); }); - check(response, { "status is 101": r => r && r.status === 101 }); + check(response, { 'status is 101': (r) => r && r.status === 101 }); } ``` diff --git a/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/03 SSL-TLS/Online Certificate Status Protocol -OCSP-.md b/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/03 SSL-TLS/Online Certificate Status Protocol -OCSP-.md index f26834d9b5..1eb50174c1 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/03 SSL-TLS/Online Certificate Status Protocol -OCSP-.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/10 Protocols/03 SSL-TLS/Online Certificate Status Protocol -OCSP-.md @@ -1,6 +1,6 @@ --- -title: "Online Certificate Status Protocol (OCSP)" -excerpt: "" +title: 'Online Certificate Status Protocol (OCSP)' +excerpt: '' --- ## What is OCSP? @@ -9,7 +9,7 @@ Online Certificate Status Protocol (OCSP) is a protocol that web browsers and cl to check the status of an issued TLS certificate with a Certificate Authority (CA), making sure it has not been revoked for whatever purpose. -There are different ways in which this can be done, by putting the burden on different parties: +This can be done in different ways, putting the burden on different parties: - The browser/client: talk to the CA (or by CA entrusted OCSP responder) with OCSP. One downside with this approach is that the CA's servers need to be available which might not always be the case. @@ -30,13 +30,13 @@ property of the response object.
```javascript -import http from "k6/http"; -import { check } from "k6"; +import http from 'k6/http'; +import { check } from 'k6'; -export default function() { - let res = http.get("https://stackoverflow.com"); +export default function () { + let res = http.get('https://stackoverflow.com'); check(res, { - "is OCSP response good": r => r.ocsp.status === http.OCSP_STATUS_GOOD + 'is OCSP response good': (r) => r.ocsp.status === http.OCSP_STATUS_GOOD, }); } ``` diff --git a/src/data/markdown/docs/01 guides/02 Using k6/11 Environment variables.md b/src/data/markdown/docs/01 guides/02 Using k6/11 Environment variables.md index ab46604a24..8fb7efc5ac 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/11 Environment variables.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/11 Environment variables.md @@ -5,12 +5,11 @@ excerpt: '' ## k6 and environment variables -There are two ways in which environment variables can be used with k6: +Environment variables can be used with k6 in two ways: - You can access any environment variables from your k6 script code, and use this to supply your VUs with configuration information. -- There are also a couple of environment variables that k6 will automatically try to read upon - startup, and which will affect its behavior. +- A couple of environment variables are automatically read by k6 on startup, affecting its behavior. ## Accessing environment variables from a script @@ -67,10 +66,10 @@ The environment variable could then be used as follows in a script: import { check, sleep } from 'k6'; import http from 'k6/http'; -export default function() { +export default function () { var r = http.get(`http://${__ENV.MY_HOSTNAME}/`); check(r, { - 'status is 200': r => r.status === 200, + 'status is 200': (r) => r.status === 200, }); sleep(5); } diff --git a/src/data/markdown/docs/01 guides/02 Using k6/12 Execution context variables.md b/src/data/markdown/docs/01 guides/02 Using k6/12 Execution context variables.md index df9ffcfd6c..0dcb072004 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/12 Execution context variables.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/12 Execution context variables.md @@ -31,8 +31,8 @@ $ k6 run --vus 10 --iterations 100 script.js ## \_\_VU and \_\_ITER -There are a couple of global variables with execution context information that k6 makes available -to the load script, namely **\_\_VU** and **\_\_ITER**. +**\_\_VU** and **\_\_ITER** are both global variables with execution context information that k6 makes +available to the test script. ### \_\_ITER diff --git a/src/data/markdown/docs/01 guides/02 Using k6/13 Session recording - HAR support.md b/src/data/markdown/docs/01 guides/02 Using k6/13 Session recording - HAR support.md index 5a9c6deca6..56bf9ac9f4 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/13 Session recording - HAR support.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/13 Session recording - HAR support.md @@ -28,7 +28,7 @@ In k6, the process looks like: ## 1. Record a HAR file -There are many browsers and tools that can be used to export HTTP traffic in a HAR format. A few popular ones are: +Multiple browsers and tools can be used to export HTTP traffic in a HAR format. A few popular ones are: - [Chrome](https://www.google.com/chrome/) - [Firefox](https://www.mozilla.org/en-US/firefox/) @@ -153,7 +153,7 @@ In a load testing, **correlation** means extracting one or more values from the The recorded HAR file may include dynamic data used on your site - `IDs`, `CSRF tokens`, `VIEWSTATE`, `wpnonce`, and other `dynamic values` - that will be converted into the k6 script. -To run your load test correctly, you may need to replace the hardcoded data with dynamic data that k6 gets from previous requests. For example, tokens will expire quickly and it is one of the most common things that users will correlate from a recorded session. +To run your load test correctly, you may need to replace the hard coded data with dynamic data that k6 gets from previous requests. For example, tokens will expire quickly and it is one of the most common things that users will correlate from a recorded session. [Here](/examples/correlation-and-dynamic-data) are a few examples using the k6 API to correlate dynamic data. diff --git a/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios.md b/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios.md index 991a0ad177..53a75182ff 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios.md @@ -11,7 +11,7 @@ hideFromSidebar: false > see the [k6 v0.27.0 release notes](https://github.com/loadimpact/k6/releases/tag/v0.27.0). Scenarios allow us to make in-depth configurations to how VUs and iterations are scheduled, allowing -k6 scenarios enable configuring how VUs and iterations are scheduled, making it possible to model diverse traffic patterns in load tests. There are several benefits of using scenarios: +k6 scenarios enable configuring how VUs and iterations are scheduled, making it possible to model diverse traffic patterns in load tests. Benefits of using scenarios include: - Multiple scenarios can be declared in the same script, and each one can independently execute a different JavaScript function, which makes organizing tests easier @@ -24,7 +24,7 @@ k6 scenarios enable configuring how VUs and iterations are scheduled, making it ## Configuration -Execution scenarios are primarily configured via the `scenarios` key of the the exported `options` object +Execution scenarios are primarily configured via the `scenarios` key of the exported `options` object in your test scripts. The key for each scenario can be an arbitrary, but unique, scenario name. It will appear in the result summary, tags, etc. diff --git a/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios/01 Executors/06 ramping-arrival-rate.md b/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios/01 Executors/06 ramping-arrival-rate.md index b261c19602..fd5a4358a6 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios/01 Executors/06 ramping-arrival-rate.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios/01 Executors/06 ramping-arrival-rate.md @@ -31,7 +31,7 @@ would like to ramp the number of iterations up or down during specific periods o ## Examples -In this example, we'll xecute a variable RPS test, starting at 50, ramping up to 200 and then back to 0, over a period of 1 minute. +In this example, we'll execute a variable RPS test, starting at 50, ramping up to 200 and then back to 0, over a period of 1 minute.
diff --git a/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios/04 Arrival Rate.md b/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios/04 Arrival Rate.md index fc6f2fb2b0..6b784b50c1 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios/04 Arrival Rate.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/14 Scenarios/04 Arrival Rate.md @@ -64,7 +64,7 @@ slowly a closed model load test will play "nice" and wait, resulting in increase iteration durations and a tapering off of the arrival rate of new VU iterations. This is not ideal when the goal is to simulate a certain arrival rate of new VUs, -or more generally throughput (eg. requests per second). +or more generally throughput (e.g. requests per second). ## Open model @@ -77,8 +77,7 @@ from the iteration duration and the influence of the target system's response ti ![Arrival rate closed/open models](../images/Scenarios/arrival-rate-open-closed-model.png) -In k6 we've implemented this open model with our "arrival rate" executors. There are -two arrival rate executors to chose from for your scenario(s), +In k6, we've implemented this open model with our two "arrival rate" executors: [constant-arrival-rate](/using-k6/scenarios/executors/constant-arrival-rate) and [ramping-arrival-rate](/using-k6/scenarios/executors/ramping-arrival-rate):
diff --git a/src/data/markdown/docs/01 guides/02 Using k6/19 Javascript Compatibility Mode.md b/src/data/markdown/docs/01 guides/02 Using k6/19 Javascript Compatibility Mode.md index 43512fc101..d399d9b0db 100644 --- a/src/data/markdown/docs/01 guides/02 Using k6/19 Javascript Compatibility Mode.md +++ b/src/data/markdown/docs/01 guides/02 Using k6/19 Javascript Compatibility Mode.md @@ -32,7 +32,7 @@ transformation outside of k6. > ### ⚠️ Disclaimer > > Your mileage may vary while running `--compatibility-mode=base` and also importing external dependencies. For instance, -> `xml2js` and `cheerio` currently dont work, while `lodash` does. +> `xml2js` and `cheerio` currently do not work, while `lodash` does. ### Basic Example diff --git a/src/data/markdown/docs/01 guides/03 Results visualization/04 DataDog.md b/src/data/markdown/docs/01 guides/03 Results visualization/04 DataDog.md index 7a8aa84b9d..e874714bdc 100755 --- a/src/data/markdown/docs/01 guides/03 Results visualization/04 DataDog.md +++ b/src/data/markdown/docs/01 guides/03 Results visualization/04 DataDog.md @@ -73,11 +73,11 @@ The environment variables for the command are: ## Visualize in Datadog -While running the test, k6 sends metrics periodically to DataDog. By default, these metrics have `k6.` as the name prefix. +While running the test, k6 sends metrics periodically to Datadog. By default, these metrics have `k6.` as the name prefix. -You can visualize k6 metrics in realtime with the [metrics explorer](https://docs.datadoghq.com/metrics/explorer/), [monitors](https://docs.datadoghq.com/monitors/), or [custom dashboards](https://docs.datadoghq.com/graphing/dashboards/). +You can visualize k6 metrics in real-time with the [metrics explorer](https://docs.datadoghq.com/metrics/explorer/), [monitors](https://docs.datadoghq.com/monitors/), or [custom dashboards](https://docs.datadoghq.com/graphing/dashboards/). -![Datadog visualizing performance testing metrics](images/DataDog/datadog-performance-testing-metrics.png) +![Datadog visualizing performance testing metrics](images/Datadog/datadog-performance-testing-metrics.png)
@@ -87,7 +87,7 @@ To learn more about all the types of k6 metrics, read the [k6 Metrics guide](/us The first time Datadog detects the `k6.http_reqs` metric, the k6 integration tile is installed automatically, and the default k6 dashboard is added to your dashboard list. -![k6 Datadog Dashboard](images/DataDog/k6-datadog-dashboard.png) +![k6 Datadog Dashboard](images/Datadog/k6-datadog-dashboard.png) Optionally, you can install the k6 integration tile following these instructions: diff --git a/src/data/markdown/docs/01 guides/03 Results visualization/05 InfluxDB - Grafana.md b/src/data/markdown/docs/01 guides/03 Results visualization/05 InfluxDB - Grafana.md index 6b21f327e7..f501ef7c6b 100644 --- a/src/data/markdown/docs/01 guides/03 Results visualization/05 InfluxDB - Grafana.md +++ b/src/data/markdown/docs/01 guides/03 Results visualization/05 InfluxDB - Grafana.md @@ -27,7 +27,6 @@ $ brew install influxdb
- ## Run the test and upload the results to InfluxDB k6 has built-in support for outputting results data directly to an InfluxDB database using @@ -69,13 +68,13 @@ $ brew install grafana
After the installation, you should have an InfluxDB server running on localhost, listening on port 8086, -and a Grafana server on `http://localhost:3000`. Now, we show two different ways to visualize your k6 metrics: +and a Grafana server on `http://localhost:3000`. Now, we show two different ways to visualize your k6 metrics: - [Custom Grafana dashboard](#custom-grafana-dashboard) - [Preconfigured Grafana dashboards](#preconfigured-grafana-dashboards) -## Custom Grafana dashboard +## Custom Grafana dashboard - Open `http://localhost:3000` (or wherever your Grafana installation is located) in your browser. - Create a data source: @@ -92,7 +91,7 @@ and a Grafana server on `http://localhost:3000`. Now, we show two different ways ## Preconfigured Grafana dashboards -Here we will list premade Grafana dashboard configurations contributed by users, for use with k6. +Here we will list pre-made Grafana dashboard configurations contributed by users, for use with k6. - [dcadwallader](https://grafana.com/grafana/dashboards/2587) - [Stian Øvrevåge](https://grafana.com/grafana/dashboards/4411) @@ -133,13 +132,12 @@ Grafana installation in the Docker container. When uploading the k6 results to InfluxDB (`k6 run --out influxdb=`), you can configure other InfluxDB options passing these environment variables: -| InfluxDB Options | Description | Default | -| --------------------------------------- | ---------------------------------------------- | -----------------------| -| `K6_INFLUXDB_USERNAME` | InfluxDB username, optional | | -| `K6_INFLUXDB_PASSWORD` | InfluxDB user password | | -| `K6_INFLUXDB_INSECURE` | If `true`, it will skip https certificate verification | `false` | -| `K6_INFLUXDB_TAGS_AS_FIELDS` | A comma-separated string to set k6 metrics as nonindexable [fields](https://docs.influxdata.com/influxdb/v1.8/concepts/glossary/#field) (instead of tags). An optional type can be specified using `:type` as in `vu:int` will make the field interger. The possible field types are `int`, `bool`, `float` and `string`, which is the default. Example: `vu:int,iter:int,url:string,event_time:int` | | - +| InfluxDB Options | Description | Default | +| ---------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `K6_INFLUXDB_USERNAME` | InfluxDB username, optional | | +| `K6_INFLUXDB_PASSWORD` | InfluxDB user password | | +| `K6_INFLUXDB_INSECURE` | If `true`, it will skip https certificate verification | `false` | +| `K6_INFLUXDB_TAGS_AS_FIELDS` | A comma-separated string to set k6 metrics as non-indexable [fields](https://docs.influxdata.com/influxdb/v1.8/concepts/glossary/#field) (instead of tags). An optional type can be specified using `:type` as in `vu:int` will make the field integer. The possible field types are `int`, `bool`, `float` and `string`, which is the default. Example: `vu:int,iter:int,url:string,event_time:int` | | ## See also diff --git a/src/data/markdown/docs/01 guides/03 Results visualization/07 NewRelic.md b/src/data/markdown/docs/01 guides/03 Results visualization/07 NewRelic.md index 444c941af7..38b71af03b 100644 --- a/src/data/markdown/docs/01 guides/03 Results visualization/07 NewRelic.md +++ b/src/data/markdown/docs/01 guides/03 Results visualization/07 NewRelic.md @@ -61,7 +61,7 @@ The _required_ environment variables used in the above command are: | `NR_ACCOUNT_ID` | The Account ID used in New Relic You can find your account ID [here](https://docs.newrelic.com/docs/accounts/accounts-billing/account-setup/account-id#:~:text=If%20you%20have%20a%20single,account%20ID%20is%20displayed%20there.). | | `NR_API_KEY` | The Insert API Key for your New Relic account to send k6 telemetry to the account ID specified above. You can generate an Insert API key [here](https://docs.newrelic.com/docs/insights/insights-data-sources/custom-data/introduction-event-api#register). | -There are also _optional_ environment variables you can use: +_Optional_ environment variables you can use: | Name | Value | | ---------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -105,7 +105,7 @@ SELECT sum(k6.http_req_duration.sum.percentiles) AS '90th' FROM Metric WHERE per
-**Max, Average, Median Request Duration Duration** +**Max, Median, and Average Request Duration**
diff --git a/src/data/markdown/docs/01 guides/04 Test Types/00 Introduction.md b/src/data/markdown/docs/01 guides/04 Test Types/00 Introduction.md index 38ec4792a0..b9fe55f6e9 100644 --- a/src/data/markdown/docs/01 guides/04 Test Types/00 Introduction.md +++ b/src/data/markdown/docs/01 guides/04 Test Types/00 Introduction.md @@ -3,7 +3,7 @@ title: 'Introduction' excerpt: 'Guide to test types' --- -There are many types of tests that can be performed with k6. Each type serves a different purpose. +It is possible to perform many types of tests using k6, each type serving a different purpose. ![Types of performance tests](./images/test-types.png) diff --git a/src/data/markdown/docs/01 guides/05 Testing Guides/01 API load testing.md b/src/data/markdown/docs/01 guides/05 Testing Guides/01 API load testing.md index b6bf18b90e..b81075ac31 100644 --- a/src/data/markdown/docs/01 guides/05 Testing Guides/01 API load testing.md +++ b/src/data/markdown/docs/01 guides/05 Testing Guides/01 API load testing.md @@ -59,9 +59,9 @@ While hammering is good in some cases, e.g. if you want to test endpoints in iso ### User flow (scenarios) testing -In the following example, you see four consecutive [HTTP requests](/using-k6/http-requests) to your API to log in, fetch user profile, update user profile and finally log out. Each request has unique characteristics, accepts some parameters and finally returns a [response](/javascript-api/k6-http/response), which is [check](/using-k6/checks)ed against a set of rules. We also pause after each request and response, for the API to be able to keep up and not be flooded. There are also a set of options at the top, that defines load test [options](/using-k6/options) in your script. +In the following example, you see four consecutive [HTTP requests](/using-k6/http-requests) to your API to log in, fetch user profile, update user profile and finally log out. Each request has unique characteristics, accepts some parameters and finally returns a [response](/javascript-api/k6-http/response), which is [check](/using-k6/checks)ed against a set of rules. We also pause after each request and response, for the API to be able to keep up and not be flooded. In the beginning of the script, we've also added a set of [options](/using-k6/options) used to control your script. -The `iterations` is a number that specifies how many iterations (executions) of the script per VU should happen, which is divided by the number of virtual users. The `vus` is a number specifying the number of concurrent sessions (virtual users) to your API. So from `iterations` and `vus` you deduct that you'll likely run each VU around 10 iterations, depending on your API response time and network roundtrip. +`iterations` is a number that specifies how many iterations (executions) of the script per VU should happen, which is divided by the number of virtual users. `vus` is a number specifying the number of concurrent sessions (virtual users) to your API. So from `iterations` and `vus` you deduct that you'll likely run each VU around 10 iterations, depending on your API response time and network roundtrip. As you can see, this is a fairly normal, yet simple, user flow that tries to mimic the user behavior while using our mobile App or website. For the sake of simplicity, only four requests has been shown, but you can easily add additional requests to be able to have a more realistic user experience. This way you can test the flow of your users' navigation in your application or platform. This is the point that distinguishes k6 from most of the currently available load testing tools, in that it can be used to test realistic user flows, instead of just relying on hammering a set of endpoints. @@ -142,7 +142,7 @@ The k6 v0.27.0 release includes the [scenarios](/using-k6/scenarios) feature, wh ## Test creation -There are various ways to create a test and eventually run it using k6. You can write your tests in your favorite editor, convert your existing Postman collections and Swagger/OpenAPI specification documents to scripts or use proxy recording as HAR files and convert them to k6 scripts. The recommended way is definitely to write your own scripts. The other tools are available just to help you with onboarding to k6. There's a rich set of tools that can help you create load tests and run them to gain insight into your system. +It is possible to create a test, and eventually run it in k6, in multiple ways. You can write your tests in your favorite editor, convert your existing Postman collections and Swagger/OpenAPI specification documents to scripts or use proxy recording as HAR files and convert them to k6 scripts. The recommended way is definitely to write your own scripts. The other tools are available just to help you with onboarding to k6. There's a rich set of tools that can help you create load tests and run them to gain insight into your system. ![Our tools](./images/our-tools.png) @@ -182,7 +182,7 @@ Originally called [FiddlerToLoadImpact](https://github.com/loadimpact/FiddlerToL As said in the introduction, it is crucial to test your API in advance, rather than relying on the untested API, which may crash at any time and have unexpected consequences. -There are various ways to test your API, each pertaining to a particular test type and each producing a different type of load. +You can test your API in multiple ways, each pertaining to a particular test type and each producing a different type of load. - [Smoke test](/test-types/smoke-testing) - [Load test](/test-types/load-testing) diff --git a/src/data/markdown/docs/01 guides/05 Testing Guides/02 Automated performance testing.md b/src/data/markdown/docs/01 guides/05 Testing Guides/02 Automated performance testing.md index 2d0a58286d..84f91ba9dd 100644 --- a/src/data/markdown/docs/01 guides/05 Testing Guides/02 Automated performance testing.md +++ b/src/data/markdown/docs/01 guides/05 Testing Guides/02 Automated performance testing.md @@ -14,9 +14,9 @@ This guide aims to lay down the steps and best practices for achieving your goal Let’s start by examining why you would consider automating your performance tests. To do that we need to revisit why we run performance tests in the first place: -- **Avoid launch failures** leading to a missed opportunity window and wasted investments, eg. your app or site crashing during a high-profile product launch event. -- **Avoid bad user experiences** leading visitors and customers to go with the competition, and you ultimately losing revenue, eg. churning hard won customers due to non responsive app or website. -- **Avoid performance regressions** as new code changes get deployed to your production system and put infront of end users. This is what this guide is primarily aimed at. +- **Avoid launch failures** leading to a missed opportunity window and wasted investments, e.g. your app or site crashing during a high-profile product launch event. +- **Avoid bad user experiences** leading visitors and customers to go with the competition, and you ultimately losing revenue, e.g. churning hard won customers due to non responsive app or website. +- **Avoid performance regressions** as new code changes get deployed to your production system and put in front of end users. This is what this guide is primarily aimed at. From here, the decision to go for automated testing is hopefully straightforward: @@ -71,7 +71,7 @@ The first step to integrating load testing in your CI is to find and install a p We built k6 for automation. It's a CLI tool that can integrate easily into your tech stack. -There are three ways to install k6: +Installing k6 can be done in three different ways: - Using one of the OS specific package managers - Pulling the Docker image @@ -100,7 +100,7 @@ Once you’ve gained enough experience with test creation, we strongly advise yo ## 3. Pass/fail criteria -Every step in an automation pipeline either passes or fails. As mentioned in [Know your goals](/testing-guides/automated-performance-testing#know-your-goals), the mechanism by which k6 decides whether a test has passed or failed is called [thresholds](/using-k6/thresholds). Without your goals codifed as thresholds there's no way for k6 to actually know if your test should be considered a success or failure. +Every step in an automation pipeline either passes or fails. As mentioned in [Know your goals](/testing-guides/automated-performance-testing#know-your-goals), the mechanism by which k6 decides whether a test has passed or failed is called [thresholds](/using-k6/thresholds). Without your goals codified as thresholds there's no way for k6 to actually know if your test should be considered a success or failure. A basic threshold on the 95th percentile of the response time metric looks like this: @@ -127,18 +127,18 @@ export let options = { }; ``` -If the test ends with one or more failed thresholds k6 will exit with a non-zero exit code signalling to the CI tool that the load test step failed, halting the build/pipeline from progressing further, and hopefully notifiying you so you can take corrective action, but more on notifications further down below. +If the test ends with one or more failed thresholds k6 will exit with a non-zero exit code signalling to the CI tool that the load test step failed, halting the build/pipeline from progressing further, and hopefully notifying you so you can take corrective action, but more on notifications further down below. ## 4. Local vs Cloud execution k6 supports both local (`k6 run ...`) and cloud execution (`k6 cloud ...`) modes. In local execution mode k6 will generate all the traffic from the machine where it's being run. In CI this would be the build servers. When executing a test locally, you can optionally stream the results to k6 Cloud for storage and visualization (`k6 run -o cloud ...`). In cloud execution mode k6 will instead bundle up and send the main JS test file, and all dependent files, to k6 Cloud as an archive for execution on cloud infrastructure managed by our k6 Cloud service. The different modes of execution are appropriate for different use cases. Some general guidance follows: -| Use case | Execution mode | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------- | -| Load test with <1000 VUs on a machine with consistent dedicated resources | Local execution | -| The target system is behind a firewall and not accessible from the public Internet | Local execution | -| Can't ensure consistent dedicated resources locally for load test, eg. your CI tool is running jobs on machines/containers with varying amounts of resources | Cloud execution | -| Need to run test from multiple geographic locations in a test | Cloud execution | +| Use case | Execution mode | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| Load test with <1000 VUs on a machine with consistent dedicated resources | Local execution | +| The target system is behind a firewall and not accessible from the public Internet | Local execution | +| Can't ensure consistent dedicated resources locally for load test, e.g. your CI tool is running jobs on machines/containers with varying amounts of resources | Cloud execution | +| Need to run test from multiple geographic locations in a test | Cloud execution | ### Authenticating with k6 Cloud @@ -154,7 +154,7 @@ Get your k6 Cloud token from the [account settings page](https://app.k6.io/accou The boring, but true, answer to the question of how often you should run load tests is that "it depends". It depends on a number of parameters. How often is your application changing? Do you need many or few VUs to generate the necessary load? How long is one full cycle of a VU iteration? etc. Testing a full user journey or just a single API endpoint or website page has different implications on the answer as well. -There are three primary factors that will affect what solution is the best for you: +Consider these three factors when picking the best solution for you: - VU iteration duration - Your branching strategy @@ -210,7 +210,7 @@ Generalized, our recommendation is as follows, broken down by VU iteration durat Besides triggering tests based on commit events, we also often see users and customers use [cron](https://k6.io/blog/performance-monitoring-with-cron-and-k6) or CI tool equivalent mechanisms for running tests on off-hours or at a particular cadence. -If you're using k6 Cloud you can use the built in [scheduling feature](/cloud/creating-and-running-a-test/scheduling-tests) to trigger tests at a frequency of your chosing. +If you're using k6 Cloud you can use the built in [scheduling feature](/cloud/creating-and-running-a-test/scheduling-tests) to trigger tests at a frequency of your choosing. **Load test suite** diff --git a/src/data/markdown/docs/01 guides/05 Testing Guides/03 Load testing websites.md b/src/data/markdown/docs/01 guides/05 Testing Guides/03 Load testing websites.md index c33c4a1a59..8f47d5a933 100644 --- a/src/data/markdown/docs/01 guides/05 Testing Guides/03 Load testing websites.md +++ b/src/data/markdown/docs/01 guides/05 Testing Guides/03 Load testing websites.md @@ -21,7 +21,7 @@ Frontend performance focuses on browser metrics like rendering time, interactive Backend performance, on the other hand, focuses mostly on the server response time and the amount of returned errors. -Which one is more important? There is no one true answer to this question. In general terms, the [performance golden rule](https://www.stevesouders.com/blog/2012/02/10/the-performance-golden-rule/) states: +Which one is more important? It depends! In general terms, the [performance golden rule](https://www.stevesouders.com/blog/2012/02/10/the-performance-golden-rule/) states: > 80-90% of the end-user response time is spent on the frontend. @@ -71,7 +71,7 @@ You start small by testing, evaluating, and iterating frequently. > > [Simple testing is better than no testing](https://k6.io/our-beliefs#simple-testing-is-better-than-no-testing) -The first thing is to decide what to load test. On the one hand, you could test your **critical services**, the most valuable to your business, and have the most significant risks. On the other hand, test the most **frequent user journeys**. +The first thing is to decide what to load test. On the one hand, you could test your **critical services**, the most valuable to your business, and have the most significant risks. On the other hand, test the most **frequent user journeys**. With this information, it’s time to analyze the frequency of usage, business value, performance risks, and any other critical performance aspect of your organization to help you deciding what to load test first. diff --git a/src/data/markdown/docs/01 guides/05 Testing Guides/04 Running large tests.md b/src/data/markdown/docs/01 guides/05 Testing Guides/04 Running large tests.md index 49f46b0e36..84681d4283 100644 --- a/src/data/markdown/docs/01 guides/05 Testing Guides/04 Running large tests.md +++ b/src/data/markdown/docs/01 guides/05 Testing Guides/04 Running large tests.md @@ -47,7 +47,7 @@ If the traffic is constant at 1Gbit/s, your test is probably limited by the netw ### CPU -Unlike many other load testing tools, k6 is heavily multithreaded. It will effectively use all available CPU cores. +Unlike many other load testing tools, k6 is heavily multi-threaded. It will effectively use all available CPU cores. The amount of CPU you need depends on your test files (sometimes called test script). Regardless of the test file, you can assume that large tests require a significant amount of CPU power. @@ -268,7 +268,7 @@ WARN[0013] Request Failed error="Get http://test.k6.io: read tcp 172.31.72 ### context deadline exceeded -Error like this happens when k6 was able to send a request, but the target system didn't respond in time. The default timeout in k6 is 60 seconds. If your system doesn't produce the response in this timeframe, this error will appear. +Error like this happens when k6 was able to send a request, but the target system didn't respond in time. The default timeout in k6 is 60 seconds. If your system doesn't produce the response in this time frame, this error will appear.
@@ -480,7 +480,7 @@ Note: each VU in k6 is completely independent, and therefore it doesn't share an ## Distributed execution -In load testing, distributed execution refers to running a load test distributed across multiple machines. +In load testing, distributed execution refers to running a load test distributed across multiple machines. Users often look for the distributed execution mode to run large-scale tests. Although we have shown that a single k6 instance can generate enormous load, distributed execution is necessary to: @@ -489,7 +489,6 @@ Users often look for the distributed execution mode to run large-scale tests. Al In k6, you can split the load of a test across multiple k6 instances using the [execution-segment](/using-k6/options#execution-segment) option. For example: -
```bash @@ -517,14 +516,12 @@ k6 run --execution-segment "3/4:1" --execution-segment-sequence "0,1/4,2/4,3 However - at this moment - the distributed execution mode of k6 is not entirely functional. The current limitations are: -- k6 does not provide a `test coordinator` or `master instance` to coordinate the distributed execution of the test. Alternatively, you can use the [k6 REST API](/misc/k6-rest-api) and [--paused](/using-k6/options#paused) to synchronize the multiple k6 instances' execution. +- k6 does not provide a `test coordinator` or `master instance` to coordinate the distributed execution of the test. Alternatively, you can use the [k6 REST API](/misc/k6-rest-api) and [--paused](/using-k6/options#paused) to synchronize the multiple k6 instances' execution. - Each k6 instance evaluates [Thresholds](/using-k6/thresholds) independently - excluding the results of the other k6 instances. If you want to disable the threshold execution, use [--no-thresholds](/using-k6/options#no-thresholds). - k6 reports the metrics individually for each instance. Depending on how you store the load test results, you'll have to aggregate some metrics to calculate them correctly. - > The k6 goal is to support a native open-source solution for distributed execution. If you want to follow the progress, subscribe to the [distributed execution issue](https://github.com/loadimpact/k6/issues/140) on GitHub. - ## Large-scale tests in k6 Cloud Building a load testing infrastructure to support running large-scale distributed tests is a challenging engineering project. @@ -533,7 +530,6 @@ Building a load testing infrastructure to support running large-scale distribute Rolling your own or buying a load testing solution is a decision to consider that depends on your project, the type of testing, your team's expertise, organization's aspects, etc. If you aren't sure which solution is a better fit for your project, reach us on the [Community Forum](https://community.k6.io/) or to the [Cloud Support team](https://k6.io/contact) to help you with your questions. - ## See also - [Fine tuning OS](/misc/fine-tuning-os) diff --git a/src/data/markdown/docs/01 guides/06 Misc/02 IntelliSense.md b/src/data/markdown/docs/01 guides/06 Misc/02 IntelliSense.md index e801f97b14..e4b3290f07 100644 --- a/src/data/markdown/docs/01 guides/06 Misc/02 IntelliSense.md +++ b/src/data/markdown/docs/01 guides/06 Misc/02 IntelliSense.md @@ -1,11 +1,11 @@ --- -title: "IntelliSense" -excerpt: "" +title: 'IntelliSense' +excerpt: '' --- IntelliSense refers to code editing features like **intelligent code completion** and **quick access to documentation**. These features can significantly improve the developer experience and productivity when working on k6 scripts in your editor of choice. -k6 has its [TypeScript Type Definition](https://www.npmjs.com/package/@types/k6) that you can configure with your editor to unlock code editing features. +k6 has its [TypeScript Type Definition](https://www.npmjs.com/package/@types/k6) that you can configure with your editor to unlock code editing features. ## Visual Studio Code @@ -19,7 +19,7 @@ In Visual Studio Code, [IntelliSense](https://code.visualstudio.com/docs/editor/ ### Setup -A way to configure IntelliSense in VS Code to recognize the [k6 Javascript API](/javascript-api) is to install the k6 Types with a package manager. For example: +A way to configure IntelliSense in VS Code to recognize the [k6 JavaScript API](/javascript-api) is to install the k6 Types with a package manager. For example: ```shell # create a `package.json` file @@ -29,9 +29,6 @@ $ npm init --yes $ npm install --save-dev @types/k6 ``` - - - ## See also - [Visual Studio Code - k6 Extension](https://marketplace.visualstudio.com/items?itemName=k6.k6) diff --git a/src/data/markdown/docs/01 guides/06 Misc/03 Fine tuning OS.md b/src/data/markdown/docs/01 guides/06 Misc/03 Fine tuning OS.md index 8ebbc71c45..3f92c316de 100644 --- a/src/data/markdown/docs/01 guides/06 Misc/03 Fine tuning OS.md +++ b/src/data/markdown/docs/01 guides/06 Misc/03 Fine tuning OS.md @@ -29,7 +29,7 @@ Below we will look at ways to increase this resource limit, and allow k6 to run **Limit types** -There are two types of resource limits in Unix systems: +Unix systems have two types of resource limits: - hard limits: these are the absolute maximum allowed for each user, and can only be configured by the root user. - soft limits: these can be configured by each user, but cannot be above the hard limit setting. diff --git a/src/data/markdown/docs/01 guides/06 Misc/05 Archive.md b/src/data/markdown/docs/01 guides/06 Misc/05 Archive.md index 4efd325539..aab34cabc0 100644 --- a/src/data/markdown/docs/01 guides/06 Misc/05 Archive.md +++ b/src/data/markdown/docs/01 guides/06 Misc/05 Archive.md @@ -1,14 +1,13 @@ --- -title: "Archive Command" -excerpt: "" +title: 'Archive Command' +excerpt: '' --- ## What is an archive? When the complexity of a k6 test goes beyond a single JS file it quickly becomes cumbersome to find and bundle up all the dependencies (JS, [open()](/javascript-api/init-context/open-filepath-mode)'ed data files, TLS -client certs, etc.). There is a need for a native way to bundle and distribute/share a test. That -is what k6 archives are for. +client certs, etc.). k6 archives are a native way to bundle and distribute, or share, a test. A k6 archive is simply a [tar](https://en.wikipedia.org/wiki/Tar_%28computing%29) file with all files needed to execute a k6 test. @@ -26,7 +25,7 @@ $ k6 run script.js
Now if you replace `run` with `archive` k6 will run the [init stage](/using-k6/test-life-cycle) of -the code to determine which JS files are being imported and what data files are being +the code to determine which JS files are being imported and what data files are being [`open()`](/javascript-api/init-context/open-filepath-mode)'ed and bundles all of the files up into a tar file: @@ -61,24 +60,28 @@ Archive files have a variety of use cases, but they all share the common need to of a test's files into a single file for easy distribution. ### Sharing a test + By bundling up a test into an archive it's easy to share the test with your teammates by simply storing or sending a single tar file. As we saw in the previous section, your teammates can execute the archive by running `k6 run archive.tar`. ### Preparing tests for CI + If you have a complex CI pipeline and your load tests are separated from your application code, you could store k6 archives as build artifacts whenever the load test source code is changed, and then pull in those k6 archives from the artifacts storage for test execution as needed. ### k6 Cloud Execution + k6 offers a commercial service for running large scale and geographically distributed load tests on managed cloud infrastructure. Cloud executed tests are triggered from the k6 command-line via the `k6 cloud script.js` command (similar to `k6 run`) which will trigger an implicit creation of a k6 archive that is uploaded and distributed to k6 cloud load generators for execution. -### Clustered Execution (*future*) +### Clustered Execution (_future_) + In the future (see [our roadmap](https://github.com/loadimpact/k6/wiki/Roadmap)) k6 will support a clustered execution mode that will enable tests to be run across more than one node. This mode of execution is also likely to make use of the archive functionality to @@ -87,7 +90,7 @@ distribute the test files to all participating nodes. ## Contents of an archive file An archive contains the original source of the JS code, any [`open()`](/javascript-api/init-context/open-filepath-mode)'ed -data files, [SSL/TLS client certificates](/using-k6/protocols/ssl-tls/ssl-tls-client-certificates) as well as a +data files, [SSL/TLS client certificates](/using-k6/protocols/ssl-tls/ssl-tls-client-certificates) as well as a `metadata.json` with all the options (a cascading of the options set on the [CLI](/using-k6/options), via [Environment variables](/using-k6/options) and [in-script options](/using-k6/options) (`export let options = {...}`)). @@ -120,7 +123,6 @@ Now, if the current working directory is `/home/johndoe/tests/api-test/` and we file using `-O filename.tar`). The contents of the archive file would look like something like this: -
```text @@ -160,7 +162,7 @@ Breaking down the file structure we get: **metadata.json** The resolved "default" options for this test based on [CLI flags](/using-k6/options), [Environment variables](/using-k6/options) and [in-script options](/using-k6/options). -***scripts*** contains the full original directory tree of all `import`'ed JS dependencies. +**_scripts_** contains the full original directory tree of all `import`'ed JS dependencies.
@@ -221,5 +223,6 @@ Breaking down the file structure we get: ## What an archive file does not contain We try to be cautious with what we include in an archive file. Some things we do to that end: -* We anonymize the username found in any path to JS and data file dependencies -* We don't include and environment variables from the system in the archive + +- We anonymize the username found in any path to JS and data file dependencies +- We don't include and environment variables from the system in the archive diff --git a/src/data/markdown/docs/01 guides/06 Misc/06 Glossary.md b/src/data/markdown/docs/01 guides/06 Misc/06 Glossary.md index bc121fcb47..0a7ef5fe07 100644 --- a/src/data/markdown/docs/01 guides/06 Misc/06 Glossary.md +++ b/src/data/markdown/docs/01 guides/06 Misc/06 Glossary.md @@ -54,7 +54,7 @@ When discussing complex topics, it is usually a good idea to define a clear, sha ### Goja -**Goja** is a javascript runtime, purely written in go, that emphasizes standard compliance and performance. We use goja to allow for test scripting without having to compromise speed, efficiency or reliability, which would have been the case using NodeJS. For more details, see the [Goja repository on GitHub](https://github.com/dop251/goja). +**Goja** is a JavaScript runtime, purely written in go, that emphasizes standard compliance and performance. We use goja to allow for test scripting without having to compromise speed, efficiency or reliability, which would have been the case using NodeJS. For more details, see the [Goja repository on GitHub](https://github.com/dop251/goja). ### Horizontal Scalability diff --git a/src/data/markdown/docs/02 javascript api/02 k6.md b/src/data/markdown/docs/02 javascript api/02 k6.md index 4c2e6a00ab..173ffbf047 100644 --- a/src/data/markdown/docs/02 javascript api/02 k6.md +++ b/src/data/markdown/docs/02 javascript api/02 k6.md @@ -1,13 +1,13 @@ --- -title: "k6" +title: 'k6' --- -The k6 module contains k6-specific functionality. +The k6 module contains k6-specific functionality. -| Function | Description | -| -------- | ----------- | +| Function | Description | +| ------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------- | | [check(val, sets, [tags])](/javascript-api/k6/check-val-sets-tags) | Runs one or more checks on a value and generates a pass/fail result but does not throw errors or otherwise interrupt execution upon failure. | -| [fail([err])](/javascript-api/k6/fail-err) | Throws an error, failing and aborting the current VU script iteration immediately. | -| [group(name, fn)](/javascript-api/k6/group-name-fn) | Runs code inside a group. Used to organize results in a test. | -| [randomSeed(int)](/javascript-api/k6/randomseed-int) | Set seed to get a reproducible pseudorandom number using `Math.random`. | -| [sleep(t)](/javascript-api/k6/sleep-t) | Suspends VU execution for the specified duration. | +| [fail([err])](/javascript-api/k6/fail-err) | Throws an error, failing and aborting the current VU script iteration immediately. | +| [group(name, fn)](/javascript-api/k6/group-name-fn) | Runs code inside a group. Used to organize results in a test. | +| [randomSeed(int)](/javascript-api/k6/randomseed-int) | Set seed to get a reproducible pseudo-random number using `Math.random`. | +| [sleep(t)](/javascript-api/k6/sleep-t) | Suspends VU execution for the specified duration. | diff --git a/src/data/markdown/docs/02 javascript api/02 k6/random-seed.md b/src/data/markdown/docs/02 javascript api/02 k6/random-seed.md index 335e773c5e..d6191e485a 100644 --- a/src/data/markdown/docs/02 javascript api/02 k6/random-seed.md +++ b/src/data/markdown/docs/02 javascript api/02 k6/random-seed.md @@ -1,14 +1,13 @@ --- -title: "randomSeed( int )" -description: "Set seed to get a reproducible pseudorandom number using `Math.random`." +title: 'randomSeed( int )' +description: 'Set seed to get a reproducible pseudo-random number using `Math.random`.' --- -Set seed to get a reproducible pseudorandom number using `Math.random`. +Set seed to get a reproducible pseudo-random number using `Math.random`. -| Parameter | Type | Description | -|-----------|--------|-----------------------| -| int | integer | The seed value. | - +| Parameter | Type | Description | +| --------- | ------- | --------------- | +| int | integer | The seed value. | ### Example @@ -17,14 +16,14 @@ Use `randomSeed` to get the same random number in all the iterations.
```js -import { randomSeed } from "k6"; +import { randomSeed } from 'k6'; export const options = { vus: 10, - duration: '5s' -} + duration: '5s', +}; -export default function() { +export default function () { randomSeed(123456789); let rnd = Math.random(); console.log(rnd); diff --git a/src/data/markdown/docs/02 javascript api/06 k6-http/10-batch- requests -.md b/src/data/markdown/docs/02 javascript api/06 k6-http/10-batch- requests -.md index 625fbb3348..0abb766346 100644 --- a/src/data/markdown/docs/02 javascript api/06 k6-http/10-batch- requests -.md +++ b/src/data/markdown/docs/02 javascript api/06 k6-http/10-batch- requests -.md @@ -1,31 +1,28 @@ --- -title: "batch( requests )" -description: "Issue multiple HTTP requests in parallel (like e.g. browsers tend to do)." +title: 'batch( requests )' +description: 'Issue multiple HTTP requests in parallel (like e.g. browsers tend to do).' --- Batch multiple HTTP requests together, to issue them in parallel over multiple TCP connections. -| Parameter | Type | Description | -| --------- | -------------- | ---------------------------------------------------------------- | +| Parameter | Type | Description | +| --------- | --------------- | ---------------------------------------------------------------- | | requests | array \| object | An array or object containing requests, in string or object form | When each request is specified as an array, the order of the arguments for each request is as follows: - ### Returns -| Type | Description | -| ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Type | Description | +| ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | object | An object containing [Response](/javascript-api/k6-http/response) objects.

It is an array when users pass an array as `requests` and is a normal object with string keys when named requests are used (see below). | - -| Position | Name | Type | Description | -| -------- | ---- | ---- | ----------- | -| 1 | method | string | Mandatory. The HTTP method of the request. One of GET, POST, PUT, PATCH, DELETE, HEAD or OPTION. | -| 2 | url | string | Mandatory. The URL to request. | -| 3 | body (optional) | string \| object | The body of the request if relevant. Can be set to `null` if not applicable but you want to set the last `params` argument. | -| 4 | params (optional) | object | [Params](/javascript-api/k6-http/params) like auth, custom headers and tags. | - +| Position | Name | Type | Description | +| -------- | ----------------- | ---------------- | --------------------------------------------------------------------------------------------------------------------------- | +| 1 | method | string | Mandatory. The HTTP method of the request. One of GET, POST, PUT, PATCH, DELETE, HEAD or OPTION. | +| 2 | url | string | Mandatory. The URL to request. | +| 3 | body (optional) | string \| object | The body of the request if relevant. Can be set to `null` if not applicable but you want to set the last `params` argument. | +| 4 | params (optional) | object | [Params](/javascript-api/k6-http/params) like auth, custom headers and tags. | ### Example with request as an array @@ -35,15 +32,10 @@ When each request is specified as an array, the order of the arguments for each import http from 'k6/http'; import { check } from 'k6'; -export default function() { +export default function () { let responses = http.batch([ ['GET', 'https://test.k6.io', null, { tags: { ctype: 'html' } }], - [ - 'GET', - 'https://test.k6.io/style.css', - null, - { tags: { ctype: 'css' } }, - ], + ['GET', 'https://test.k6.io/style.css', null, { tags: { ctype: 'css' } }], [ 'GET', 'https://test.k6.io/images/logo.png', @@ -52,7 +44,7 @@ export default function() { ], ]); check(responses[0], { - 'main page status was 200': res => res.status === 200, + 'main page status was 200': (res) => res.status === 200, }); } ``` @@ -67,15 +59,10 @@ export default function() { import http from 'k6/http'; import { check } from 'k6'; -export default function() { +export default function () { let responses = http.batch([ ['GET', 'https://test.k6.io', null, { tags: { ctype: 'html' } }], - [ - 'GET', - 'https://test.k6.io/style.css', - null, - { tags: { ctype: 'css' } }, - ], + ['GET', 'https://test.k6.io/style.css', null, { tags: { ctype: 'css' } }], [ 'GET', 'https://test.k6.io/images/logo.png', @@ -84,7 +71,7 @@ export default function() { ], ]); check(responses[0], { - 'main page status was 200': res => res.status === 200, + 'main page status was 200': (res) => res.status === 200, }); } ``` @@ -101,7 +88,7 @@ You can also use objects to hold information about a request. Here is an example import http from 'k6/http'; import { check } from 'k6'; -export default function() { +export default function () { let req1 = { method: 'GET', url: 'https://httpbin.org/get', @@ -124,14 +111,14 @@ export default function() { // httpbin.org should return our POST data in the response body, so // we check the third response object to see that the POST worked. check(responses[2], { - 'form data OK': res => JSON.parse(res.body)['form']['hello'] == 'world!', + 'form data OK': (res) => JSON.parse(res.body)['form']['hello'] == 'world!', }); } ```
-_Note that the requests in the example above may happen in any order, or simultaneously. There is no guarantee that e.g. req1 will happen before req2 or req3_ +_Note that the requests in the example above may happen in any order, or simultaneously. When running requests in batches, there is no guarantee that e.g. req1 will happen before req2 or req3_ ### Example with named requests @@ -143,7 +130,7 @@ Finally, you can also send in named requests by using an object instead of an ar import http from 'k6/http'; import { check } from 'k6'; -export default function() { +export default function () { let requests = { 'front page': 'https://k6.io', 'features page': { @@ -156,7 +143,7 @@ export default function() { // when accessing results, we use the name of the request as index // in order to find the corresponding Response object check(responses['front page'], { - 'front page status was 200': res => res.status === 200, + 'front page status was 200': (res) => res.status === 200, }); } ``` diff --git a/src/data/markdown/docs/02 javascript api/07 k6-metrics.md b/src/data/markdown/docs/02 javascript api/07 k6-metrics.md index 94b73d56fc..4c57eacec6 100644 --- a/src/data/markdown/docs/02 javascript api/07 k6-metrics.md +++ b/src/data/markdown/docs/02 javascript api/07 k6-metrics.md @@ -3,12 +3,12 @@ title: 'k6/metrics' excerpt: 'k6 Custom Metrics API' --- -The metrics module provides functionality to create [custom metrics](/using-k6/metrics) of various types. All metrics (both the [built-in metrics](/using-k6/metrics#built-in-metrics) and the custom ones) have a type. There are four different metrics types, and they are: `Counter`, `Gauge`, `Rate` and `Trend`. +The metrics module provides functionality to create [custom metrics](/using-k6/metrics) of various types. All metrics (both the [built-in metrics](/using-k6/metrics#built-in-metrics) and the custom ones) have a type. All values added to a custom metric can optionally be [tagged](/using-k6/tags-and-groups) which can be useful when analysing the test results. -| Metric type | Description | -| ------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- | +| Metric type | Description | +| --------------------------------------------- | -------------------------------------------------------------------------------------------------------- | | [Counter](/javascript-api/k6-metrics/counter) | A metric that cumulatively sums added values. | | [Gauge](/javascript-api/k6-metrics/gauge) | A metric that stores the min, max and last values added to it. | | [Rate](/javascript-api/k6-metrics/rate) | A metric that tracks the percentage of added values that are non-zero. | diff --git a/src/data/markdown/docs/02 javascript api/07 k6-metrics/73 Trend.md b/src/data/markdown/docs/02 javascript api/07 k6-metrics/73 Trend.md index a0b11989f9..f1dbecbf26 100644 --- a/src/data/markdown/docs/02 javascript api/07 k6-metrics/73 Trend.md +++ b/src/data/markdown/docs/02 javascript api/07 k6-metrics/73 Trend.md @@ -21,7 +21,7 @@ When `Trend` is used in a threshold expression, there are a range of variables t - `min` for minimum - `max` for maximum - `med` for median -- `p(N)` for specific percentile. `N` is a number between `0.0` and `100.0` meaning the percentile value to look at, eg. `p(99.99)` means the 99.99th percentile. +- `p(N)` for specific percentile. `N` is a number between `0.0` and `100.0` meaning the percentile value to look at, e.g. `p(99.99)` means the 99.99th percentile. The unit of these variables and functions are all in milliseconds. diff --git a/src/data/markdown/docs/03 cloud/01 Creating and running a test/01 Test Builder.md b/src/data/markdown/docs/03 cloud/01 Creating and running a test/01 Test Builder.md index 4cdda4444b..854cbd57da 100644 --- a/src/data/markdown/docs/03 cloud/01 Creating and running a test/01 Test Builder.md +++ b/src/data/markdown/docs/03 cloud/01 Creating and running a test/01 Test Builder.md @@ -43,7 +43,7 @@ To add a new request, click `ADD REQUEST`. Your test will execute in the order o To modify requests, move over to the right side of the `REQUESTS` section. You are able to: - Give your request a name to better describe it. -- Change the `HTTP METHOD` by using the drop down prepopulated with `GET`. +- Change the `HTTP METHOD` by using the drop down pre-populated with `GET`. - Change the URL/Endpoint (This is predefine as `http://test.k6.io/` for example purposes) - Specify Headers (If you have imported a HAR file, we will include some Headers here) - Specify Query Parameters diff --git a/src/data/markdown/docs/03 cloud/01 Creating and running a test/02 Running a test from the CLI.md b/src/data/markdown/docs/03 cloud/01 Creating and running a test/02 Running a test from the CLI.md index 1721904039..2f41cfe88d 100644 --- a/src/data/markdown/docs/03 cloud/01 Creating and running a test/02 Running a test from the CLI.md +++ b/src/data/markdown/docs/03 cloud/01 Creating and running a test/02 Running a test from the CLI.md @@ -186,10 +186,10 @@ You have two options to pass the Project ID to k6: When running a k6 test in the cloud we add two tags to all metrics: -| Tag name | Type | Description | -| ------------- | ------ | -------------------------------------------------------------------------------------------------------- | -| `load_zone` | string | The load zone from where the the metric was collected. Values will be of the form: `amazon:us :ashburn`. | -| `instance_id` | number | A unique number representing the ID of a load generator server taking part in the test. | +| Tag name | Type | Description | +| ------------- | ------ | ---------------------------------------------------------------------------------------------------- | +| `load_zone` | string | The load zone from where the metric was collected. Values will be of the form: `amazon:us :ashburn`. | +| `instance_id` | number | A unique number representing the ID of a load generator server taking part in the test. | The cloud tags are automatically added when collecting the test metrics, and they work as regular tags. @@ -264,7 +264,7 @@ When running in the k6 Cloud there will be three additional environment variable | Name | Value | Description | | ----------------- | ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `LI_LOAD_ZONE` | string | The load zone from where the the metric was collected. Values will be of the form: amazon:us :ashburn (see list above). | +| `LI_LOAD_ZONE` | string | The load zone from where the metric was collected. Values will be of the form: amazon:us :ashburn (see list above). | | `LI_INSTANCE_ID` | number | A sequential number representing the unique ID of a load generator server taking part in the test, starts at 0. | | `LI_DISTRIBUTION` | string | The value of the "distribution label" that you used in `ext.loadimpact.distribution` corresponding to the load zone the script is currently executed in. | diff --git a/src/data/markdown/docs/03 cloud/01 Creating and running a test/06 Scheduling a test.md b/src/data/markdown/docs/03 cloud/01 Creating and running a test/06 Scheduling a test.md index be82254e53..75ab9da3be 100644 --- a/src/data/markdown/docs/03 cloud/01 Creating and running a test/06 Scheduling a test.md +++ b/src/data/markdown/docs/03 cloud/01 Creating and running a test/06 Scheduling a test.md @@ -22,7 +22,7 @@ You can schedule any of your tests from the page with the performance trending g ## Scheduling options -In both cases, after clicking "Schedule" you are presented with the following options. You are able to run a test now or at a later date. You can also set the execution to repeat on an Hourly, Daily, Weekly, or Monthly interval. You can also control how long the test will run for, either after a set number of occurrences, or after a certain date. There is some very granular control here, so do explore the option. +In both cases, after clicking "Schedule" you are presented with the following options. You are able to run a test now or at a later date. You can also set the execution to repeat on an Hourly, Daily, Weekly, or Monthly interval. You can also control how long the test will run for, either after a set number of occurrences, or after a certain date. The granularity for controlling this is high, so do explore the options. ![Scheduling options](/images/Scheduling-a-test/schedule-options.png) diff --git a/src/data/markdown/docs/03 cloud/02 Analyzing Results/02 Performance Insights.md b/src/data/markdown/docs/03 cloud/02 Analyzing Results/02 Performance Insights.md index c3fb8808ff..833524f085 100644 --- a/src/data/markdown/docs/03 cloud/02 Analyzing Results/02 Performance Insights.md +++ b/src/data/markdown/docs/03 cloud/02 Analyzing Results/02 Performance Insights.md @@ -30,8 +30,8 @@ Errors that occur early on are typically not considered to be performance relate With that in mind, there are a number of non-performance related reasons for errors, which includes, but is not limited to: - You're making invalid requests: - - Invalid URLs, eg. with a typo in it or a hostname that is not in the public DNS system. - - Missing required headers, eg. authentication/authorization headers or user-agent. + - Invalid URLs, e.g. with a typo in it or a hostname that is not in the public DNS system. + - Missing required headers, e.g. authentication/authorization headers or user-agent. - Sending the wrong body data. - You're trying to test a system that's behind a firewall. - You're hitting a rate limit. @@ -104,12 +104,12 @@ Groups are meant to organize and provide an overview of your result tests allowi ```javascript import { group } from 'k6'; -export default function() { - group('user flow: returning user', function() { - group('visit homepage', function() { +export default function () { + group('user flow: returning user', function () { + group('visit homepage', function () { // load homepage resources }); - group('login', function() { + group('login', function () { // perform login }); }); diff --git a/src/data/markdown/docs/03 cloud/02 Analyzing Results/06 Analysis Tab.md b/src/data/markdown/docs/03 cloud/02 Analyzing Results/06 Analysis Tab.md index 7814b65eaa..9322cbd705 100644 --- a/src/data/markdown/docs/03 cloud/02 Analyzing Results/06 Analysis Tab.md +++ b/src/data/markdown/docs/03 cloud/02 Analyzing Results/06 Analysis Tab.md @@ -17,7 +17,7 @@ Other things you can do on this tab: - View metrics you added from previous tabs. They will show up below the main chart. - You may also change aggregation of these metrics in this tab or filter on tags - Add small charts to the larger chart by clicking on the "+" in the top right corner of the small charts -- Add additional metrics to the small chart area by clickin `ADD NEW METRIC` in the small chart area +- Add additional metrics to the small chart area by clicking `ADD NEW METRIC` in the small chart area ![Analysis Tab](/images/06-Analysis-Tab/analysis-tab.png) diff --git a/src/data/markdown/docs/03 cloud/02 Analyzing Results/07 Logs Tab.md b/src/data/markdown/docs/03 cloud/02 Analyzing Results/07 Logs Tab.md index c8d9edc575..0ed5b2db06 100644 --- a/src/data/markdown/docs/03 cloud/02 Analyzing Results/07 Logs Tab.md +++ b/src/data/markdown/docs/03 cloud/02 Analyzing Results/07 Logs Tab.md @@ -10,7 +10,7 @@ excerpt: 'The Logs tab allows you to view console logs' > > This feature is new as of version `0.28.0` and is currently in Beta. -When developing a load test, it's often useful to print messages for debugging purposes. +When developing a load test, it's often useful to print messages for debugging purposes. The k6 API supports the following console logging methods: @@ -20,12 +20,12 @@ The k6 API supports the following console logging methods: - `console.warn()` - `console.error()` - Logs can aid you in troubleshooting your test execution. But they should NOT replace the functionality of other k6 APIs. For example, it is often an **anti-pattern** to use `logs` to: -- Track the status of a condition. Instead, use [Checks](/javascript-api/k6/check-val-sets-tags) to assert these conditions. -- Track a variable value during the test execution. Instead, use the [Trend](/javascript-api/k6-metrics/trend) metric. + +- Track the status of a condition. Instead, use [Checks](/javascript-api/k6/check-val-sets-tags) to assert these conditions. +- Track a variable value during the test execution. Instead, use the [Trend](/javascript-api/k6-metrics/trend) metric. > **Tip** > @@ -40,6 +40,7 @@ The Logs Tab allows you to view and filter log messages in the Cloud Results pag ![Cloud Logs Tab](/images/11-Cloud-Logs/cloud-logs-output-messages.png) In addition to the log messages, the log panel shows context information such as: + - Log date in the local time zone - Load zone: the geographic zone where the load generator server is located - Instance ID: numerical ID of the load generator server taking part in the test @@ -48,8 +49,9 @@ See [how the k6 Cloud injects environment variables](/cloud/creating-and-running ### Filter by log level and load zone -Each Javascript log statement has a severity level: -- **Info**: `console.log` and `console.info`. +Each JavaScript log statement has a severity level: + +- **Info**: `console.log` and `console.info`. - **Debug**: `console.debug`. - **Warning**: `console.warning`. - **Error**: `console.error`. @@ -63,7 +65,8 @@ You can filter messages by severity level and load zone. The load zone filter is Logs are intended to help you in finding script issues and debugging execution anomalies. You should NOT rely on logging to interpret or analyze the performance of your system. For this reason, the cloud logs have some limitations: -- The logs are deleted 3 days after the test execution. + +- The logs are deleted 3 days after the test execution. - The number of log lines is limited to 10 messages per second per server. If this limit is crossed, a warning messages appears showing the number of discarded log lines. ![Cloud Logs Tab Drop Message](/images/11-Cloud-Logs/cloud-logs-output-drop-messages.png) @@ -74,10 +77,8 @@ k6 aims to have a consistent experience when running local and cloud tests. When running cloud tests using the CLI (`k6 cloud`), k6 will print cloud logs on the standard output as it does with your local tests. - ![Cloud Logs Tab in CLI](/images/11-Cloud-Logs/cloud-logs-cli-output.png) If you don't want the cloud logs to be printed on the terminal, add the `--show-logs=false` argument. - -Next, [Test Comparison](/cloud/analyzing-results/test-comparison) \ No newline at end of file +Next, [Test Comparison](/cloud/analyzing-results/test-comparison) diff --git a/src/data/markdown/docs/03 cloud/03 Integrations/04 Token.md b/src/data/markdown/docs/03 cloud/03 Integrations/04 Token.md index d242a6f1c7..d28b41d491 100644 --- a/src/data/markdown/docs/03 cloud/03 Integrations/04 Token.md +++ b/src/data/markdown/docs/03 cloud/03 Integrations/04 Token.md @@ -9,7 +9,7 @@ Below are some examples on how to utilize the token to authenticate.
-> #### Google/Github Single Sign-On Users +> #### Google/GitHub Single Sign-On Users > > For Single Sign-On (SSO) users, `k6 login cloud` requires a k6 cloud account email and password. You will need to create a password by using [Forgot Password](), or you'll instead need to get your API authentication token from the app and supply that explicitly: `k6 login cloud --token YOUR_API_AUTH_TOKEN`. > See below for more information. @@ -48,7 +48,7 @@ This will login to your account, fetch (and create of necessary) your k6 cloud A ## Authenticating with API token -If you're a Google/Github Single Sign-On (SSO) user or if you have a use case where using your k6 cloud account credentials is not appropriate you can choose to enter your k6 cloud API authentication token directly by entering the following command into your terminal: +If you're a Google/GitHub Single Sign-On (SSO) user or if you have a use case where using your k6 cloud account credentials is not appropriate you can choose to enter your k6 cloud API authentication token directly by entering the following command into your terminal:
diff --git a/src/data/markdown/docs/03 cloud/03 Integrations/05 Cloud APM.md b/src/data/markdown/docs/03 cloud/03 Integrations/05 Cloud APM.md index f483ddbdab..cfefb23078 100644 --- a/src/data/markdown/docs/03 cloud/03 Integrations/05 Cloud APM.md +++ b/src/data/markdown/docs/03 cloud/03 Integrations/05 Cloud APM.md @@ -9,10 +9,10 @@ k6 Cloud platform supports exporting metrics to APM platforms, thereby enabling Currently, the following platforms are supported: -| Provider | URL(s) | -| --------- | --------------------------- | -| datadog | | -| datadogeu | | +| Provider | URL(s) | +| --------- | ------------------------------------------------------ | +| datadog | [https://www.datadoghq.com](https://www.datadoghq.com) | +| datadogeu | [https://www.datadoghq.eu](https://www.datadoghq.eu) | This list will be expanded in the future. Please [contact us](https://k6.io/contact) if you would like an integration that isn't currently listed. @@ -30,9 +30,9 @@ For maximum flexibility, the APM export functionality is configured on the test- | `resample_rate` | The rate by which the metrics are resampled and sent to the APM provider in seconds. Default is 3 and acceptable values are integers between 1 and 10. | | `include_test_run_id` | If set, the `test_run_id` will be exported per each metric as an extra tag. Default is `false`. | -**Note**: This [guide](https://docs.datadoghq.com/account_management/api-app-keys/) will walk you through creating an `api_key` and an `app_key` on DataDog. Note that the `api_key` and `app_key` for `datadog` won't work on `datadogeu`. +**Note**: This [guide](https://docs.datadoghq.com/account_management/api-app-keys/) will walk you through creating an `api_key` and an `app_key` on Datadog. Note that the `api_key` and `app_key` for `datadog` won't work on `datadogeu`. -The `metrics` parameter allows you to specify built-in and custom metrics to be exported to the APM provider. By default, only the basic [metrics](/using-k6/metrics) listed below are exported. These defaults also match the [official k6 dashboard for DataDog](https://docs.datadoghq.com/integrations/k6/), which you can read more about on [visualization of metrics in DataDog](/results-visualization/datadog#visualize-in-datadog). +The `metrics` parameter allows you to specify built-in and custom metrics to be exported to the APM provider. By default, only the basic [metrics](/using-k6/metrics) listed below are exported. These defaults also match the [official k6 dashboard for Datadog](https://docs.datadoghq.com/integrations/k6/), which you can read more about on [visualization of metrics in Datadog](/results-visualization/datadog#visualize-in-datadog). - data_sent - data_received @@ -58,8 +58,8 @@ export let options = { apm: [ { provider: "datadog", - api_key: "", - app_key: "", + api_key: "", + app_key: "", metrics: ["http_req_sending", "my_rate", "my_gauge", ...], include_default_metrics: true, include_test_run_id: false @@ -77,7 +77,7 @@ Make sure to meet the following requirements, otherwise, we can't guarantee a wo 3. If the APM configuration has errors, (e.g. invalid provider, wrong credentials, etc) the configuration will be ignored, and the test will be executed without the APM functionality. 4. If you provide invalid metrics to the `metrics` field, the test will continue, but the metrics export(s) will not include the invalid metric. 5. The metrics defined in `metrics` are case-sensitive. -6. The [official k6 dashboard on DataDog](https://docs.datadoghq.com/integrations/k6/) gives you the ability to filter metrics based on `test_run_id`, but we don't export `test_run_id` as an extra tag by default. If you want to export it, you should set `include_test_run_id` to `true`. +6. The [official k6 dashboard on Datadog](https://docs.datadoghq.com/integrations/k6/) gives you the ability to filter metrics based on `test_run_id`, but we don't export `test_run_id` as an extra tag by default. If you want to export it, you should set `include_test_run_id` to `true`. ## Limitations diff --git a/src/data/markdown/docs/03 cloud/04 Project and Team Management/02 Projects.md b/src/data/markdown/docs/03 cloud/04 Project and Team Management/02 Projects.md index 0edf7d9f72..5a911eb090 100644 --- a/src/data/markdown/docs/03 cloud/04 Project and Team Management/02 Projects.md +++ b/src/data/markdown/docs/03 cloud/04 Project and Team Management/02 Projects.md @@ -1,15 +1,15 @@ --- title: 'Projects' -excerpt: 'Keep your tests and team members organized with projects, a foldering system built into the k6 web app' +excerpt: 'Keep your tests and team members organized with projects, a filing system built into the k6 web app' --- ## Background -Projects are a way to stay organized within your account in k6. In the simpliest terms, projects can be considered a foldering system which you can use to organize your tests. Projects are assigned on a per organization level. Organization owners and admins can invite users to be members of a project. Only Read/Write members can be explicitly restricted from accessing a project. +Projects are a way to stay organized within your account in k6. In the simplest terms, projects can be considered a filing system which you can use to organize your tests. Projects are assigned on a per organization level. Organization owners and admins can invite users to be members of a project. Only Read/Write members can be explicitly restricted from accessing a project. ## Using Projects -Projects are a simple foldering system. They are flexible enough to allow you to use them in a way that makes sense to you, but simple enough to not lose information in a deep nested structure. +Projects can be thought of as a simple filing system. They are flexible enough to allow you to use them in a way that makes sense to you, but simple enough to not lose information in a deep nested structure. Here are some ways we have seen users utilize Projects to stay organized: diff --git a/src/data/markdown/docs/03 cloud/04 Project and Team Management/04 Azure AD SAML SSO.md b/src/data/markdown/docs/03 cloud/04 Project and Team Management/04 Azure AD SAML SSO.md index b0f93a712a..6df6962f16 100644 --- a/src/data/markdown/docs/03 cloud/04 Project and Team Management/04 Azure AD SAML SSO.md +++ b/src/data/markdown/docs/03 cloud/04 Project and Team Management/04 Azure AD SAML SSO.md @@ -5,11 +5,11 @@ excerpt: 'Guide on setting up Azure AD to act as a SAML SSO IdP with k6 Cloud' ## Background -Federated authentication is a must to virtually all organizations beyond a certain size. Microsoft's Active Directory product has been a long time gold standard for managing an enterprise's users and their access permissions, and Azure Active Directory is its direct cloud counterpart. k6 Cloud intergrates with Azure AD to provide organizations with a compliant way to handle on- and offboarding of team members to the service. +Federated authentication is a must to virtually all organizations beyond a certain size. Microsoft's Active Directory product has been a long time gold standard for managing an enterprise's users and their access permissions, and Azure Active Directory is its direct cloud counterpart. k6 Cloud integrates with Azure AD to provide organizations with a compliant way to handle on- and offboarding of team members to the service. ## What is SAML? -Security Assertion Markup Language (SAML) is an open standard for exchanging authentication and authorization data between parties, in particular, between an Identity Provider (eg. Azure AD) and a Service Provider (eg. k6 Cloud). SAML is an XML-based markup language for security assertions (statements that service providers use to make access-control decisions). +Security Assertion Markup Language (SAML) is an open standard for exchanging authentication and authorization data between parties, in particular, between an Identity Provider (e.g. Azure AD) and a Service Provider (e.g. k6 Cloud). SAML is an XML-based markup language for security assertions (statements that service providers use to make access-control decisions). Read more over at [Wikipedia](https://en.wikipedia.org/wiki/Security_Assertion_Markup_Language). @@ -17,7 +17,7 @@ Read more over at [Wikipedia](https://en.wikipedia.org/wiki/Security_Assertion_M To setup Azure AD SAML SSO based authentication to k6 Cloud you must have: -1. A [Team plan](https://k6.io/pricing) or above and the SAML SSO addon($), alternatively be on an Enterprise plan. +1. A [Team plan](https://k6.io/pricing) or above and the SAML SSO add-on(\$), alternatively be on an Enterprise plan. 2. An [Azure AD Premium Subscription](https://azure.microsoft.com/en-us/pricing/details/active-directory/). ## Configuration @@ -34,7 +34,7 @@ To setup Azure AD SAML SSO based authentication to k6 Cloud you must have: ![Azure AD New Application Type](images/04-Azure-AD-SAML-SSO/azure-ad-new-application-type.png) -6. Give the application a name, eg. **k6 Cloud**. +6. Give the application a name, e.g. **k6 Cloud**. 7. Click **"Add"**. @@ -52,10 +52,10 @@ To setup Azure AD SAML SSO based authentication to k6 Cloud you must have: Setting: - | Property | Value | - | ------------------------------------------ | ----------------------------------- | - | Identifier (Entity ID) | `https://api.k6.io/sso/acs/` | - | Reply URL (Assertion Consumer Service URL) | `https://api.k6.io/sso/acs/` | + | Property | Value | + | ------------------------------------------ | ---------------------------------- | + | Identifier (Entity ID) | `https://api.k6.io/sso/acs/` | + | Reply URL (Assertion Consumer Service URL) | `https://api.k6.io/sso/acs/` | | Logout Url | `https://app.k6.io/account/logout` | Resulting in: @@ -68,13 +68,13 @@ To setup Azure AD SAML SSO based authentication to k6 Cloud you must have: Setting the following user attributes (and clearing the "Namespace" property for each attribute): - | Attribute | Value | - | -------------------------| ------------------------------------------------------------------------- | - | `Unique User Identifier` | `user.userprincipalname` | - | `user.email` | `user.userprincipalname` | - | `user.username` | `user.userprincipalname` | - | `user.first_name` | `user.givenname` | - | `user.last_name` | `user.surname` | + | Attribute | Value | + | ------------------------ | -------------------------------------------------------------------------- | + | `Unique User Identifier` | `user.userprincipalname` | + | `user.email` | `user.userprincipalname` | + | `user.username` | `user.userprincipalname` | + | `user.first_name` | `user.givenname` | + | `user.last_name` | `user.surname` | | `token` | An unique token that you'll be provided with by the k6 Cloud support team. | Resulting in: diff --git a/src/data/markdown/docs/03 cloud/07 Cloud FAQ/01 Pricing.md b/src/data/markdown/docs/03 cloud/07 Cloud FAQ/01 Pricing.md index 2e13b0b36e..814b22180f 100644 --- a/src/data/markdown/docs/03 cloud/07 Cloud FAQ/01 Pricing.md +++ b/src/data/markdown/docs/03 cloud/07 Cloud FAQ/01 Pricing.md @@ -1,15 +1,13 @@ --- -title: "Pricing FAQ" -excerpt: "Frequently asked questions about k6 cloud pricing and subscriptions" +title: 'Pricing FAQ' +excerpt: 'Frequently asked questions about k6 cloud pricing and subscriptions' --- - This document contains some of our most frequently asked questions about pricing. If your question isn't answered here, please contact [support](mailto:support@k6.io). - ## Does k6 have a concept of VU Hours? -No - There is no concept of VU hours for our plans. You can run all your tests up to the maximum limits as defined by your subscription. +No - there is no concept of VU hours for our plans. You can run all your tests up to the maximum limits as defined by your subscription. ## What tests count against my monthly limit? diff --git a/src/data/markdown/docs/03 cloud/07 Cloud FAQ/04 Debugging test scripts.md b/src/data/markdown/docs/03 cloud/07 Cloud FAQ/04 Debugging test scripts.md index 22c3e4d73a..fd53f8172b 100644 --- a/src/data/markdown/docs/03 cloud/07 Cloud FAQ/04 Debugging test scripts.md +++ b/src/data/markdown/docs/03 cloud/07 Cloud FAQ/04 Debugging test scripts.md @@ -1,21 +1,20 @@ --- -title: "What is the best way to debug my load test scripts" -excerpt: "Tips and tricks to help debug your load test scripts efficiently" +title: 'What is the best way to debug my load test scripts' +excerpt: 'Tips and tricks to help debug your load test scripts efficiently' --- ## Background A common task to any development is debugging your code to make sure it's producing the expect output and results. k6 utilizes JavaScript as the scripting language for writing your load tests. Because tests are written in real code, you can and should debug your tests scripts before running them. Utilize the tips in this document to aid you in speeding up your script development. - ## Tip 1: Use k6 locally to debug your scripts While there is an code editor built into the k6 web application, it has limited debugging abilities. Using k6 locally, you can actually execute your test scripts on a small scale to quickly see how the script executes. - ## Tip 2: Debug locally only. -Building on tip 1 above, you want to avoid streaming your results to our cloud using `-o cloud` and avoid running the test using the cloud service. There are two reasons for this: +Building on tip 1 above, you want to avoid streaming your results to our cloud using `-o cloud` and avoid running the test using the cloud service. +Debugging locally is beneficial for two reasons: 1. Tests that run in or stream to our cloud will count against any limits you may have 2. Execution is slower when streaming or executing in the cloud. We want debugging to be a fast iterative process. @@ -28,8 +27,7 @@ k6 run myScript.js
-When debugging, you'll likely be making many changes as you work through your scripts to ensure they work as expected. The data sent or run from the cloud won't be of much value, so just keep it local until you need to run a larger test. - +When debugging, you'll likely be making many changes as you work through your scripts to ensure they work as expected. The data sent or run from the cloud won't be of much value, so just keep it local until you need to run a larger test. ## Tip 3: Use flags to limit execution when debugging @@ -43,12 +41,11 @@ k6 run myScript.js -i 1 -u 1
-**Note**: 1 Virtual User and 1 iteration is also the default execution for k6. If you have not defined any VUs or iterations in your test, k6 will execute with 1. - +**Note**: 1 Virtual User and 1 iteration is also the default execution for k6. If you have not defined any VUs or iterations in your test, k6 will execute with 1. ## Tip 4: Use builtin debugging options -Sometimes you need to understand more details about the requests being sent and response received. Using `--http-debug` as a flag allows you to do just that. You can also print full response bodies by using `--http-debug="full"` +Sometimes you need to understand more details about the requests being sent and response received. Using `--http-debug` as a flag allows you to do just that. You can also print full response bodies by using `--http-debug="full"`
@@ -60,12 +57,11 @@ k6 run myScript.js --http-debug="full" **Note**: If your test script has a large number of HTTP requests, this will produce a large output. - ## Tip 5: Print info to the terminal window with console.log(); when debugging -Sometimes it's just easier to print some information to the terminal window. Feel free to use `console.log();` to print useful information to the terminal window. Perhaps you want to examine some JSON returned in a response, a specific response body, or even just know if you've correctly entered/exited loops or IF statements. +Sometimes it's just easier to print some information to the terminal window. Feel free to use `console.log();` to print useful information to the terminal window. Perhaps you want to examine some JSON returned in a response, a specific response body, or even just know if you've correctly entered/exited loops or IF statements. -To take the above a step further, consider the following snippet of code. We are making a GET request, saving the response to `res` and then logging the complete response object. Now we can examine it to find exactly what we may be looking for to adapt our test script. +To take the above a step further, consider the following snippet of code. We are making a GET request, saving the response to `res` and then logging the complete response object. Now we can examine it to find exactly what we may be looking for to adapt our test script.
diff --git a/src/data/markdown/docs/03 cloud/07 Cloud FAQ/06 What's the Difference Between k6 Cloud's Version 3.0 (Lua) and 4.0(JavaScript).md b/src/data/markdown/docs/03 cloud/07 Cloud FAQ/06 What's the Difference Between k6 Cloud's Version 3.0 (Lua) and 4.0(JavaScript).md index 4c8779e84e..f61b12e057 100644 --- a/src/data/markdown/docs/03 cloud/07 Cloud FAQ/06 What's the Difference Between k6 Cloud's Version 3.0 (Lua) and 4.0(JavaScript).md +++ b/src/data/markdown/docs/03 cloud/07 Cloud FAQ/06 What's the Difference Between k6 Cloud's Version 3.0 (Lua) and 4.0(JavaScript).md @@ -1,6 +1,6 @@ --- title: "What's the Difference Between LoadImpact's Version 3.0 (Lua) and k6 Cloud 4.0(JavaScript)" -excerpt: "A brief overview of the differences between LoadImpact 3.0 (Lua) and k6 Cloud 4.0 (JS/k6) products" +excerpt: 'A brief overview of the differences between LoadImpact 3.0 (Lua) and k6 Cloud 4.0 (JS/k6) products' --- ## Purpose @@ -16,6 +16,7 @@ From a general performance testing perspective the 3.0 and 4.0 products are more 3. You execute your test, metrics data is collected and you are presented with results. ## Differences between 3.0 and 4.0 products + When looking more closely though there are some differences in how you accomplish step 1, 2 and 3 above. A big difference is in the workflow that you can accomplish with each respective product. @@ -71,7 +72,7 @@ export let options = {
-*** +--- ## Lua to JS migration guide @@ -107,7 +108,6 @@ export default function() { In Lua VUs execute the script from top to bottom over and over, while in JS VUs execute the global scope (aka "init code") once to initialize, and then executes the "main function" (`export default function`) over and over: -
```lua linenos @@ -124,14 +124,12 @@ export default function() {
- ## Converting Lua APIs to JS APIs ## Client sleep/think time Below you have examples on how to have a VU sleep or think for a specific amount of time (in the example below for 3 seconds), pausing the VU execution: -
```lua linenos @@ -147,7 +145,6 @@ export default function() {
- ## Making requests To make HTTP requests there are a number of different Lua APIs available. In the end they're all wrappers around the `http.request_batch()` API. Below you can see a comparison for Lua and JS: @@ -183,12 +180,11 @@ export default function() {
- - See the [HTTP API](/using-k6/http-requests) docs for k6 for more information and examples. ## Group requests and logic into transactions/pages -In the 3.0 product there's a concept of pages. Lua code in between calls to `http.page_start()` and `http.page_end()` will be be measured to provide a page load times in the results. The equivalent in JS would be to use [`Groups`](/using-k6/tags-and-groups#groups): + +In the 3.0 product there's a concept of pages. Lua code in between calls to `http.page_start()` and `http.page_end()` will be measured to provide a page load times in the results. The equivalent in JS would be to use [`Groups`](/using-k6/tags-and-groups#groups):
@@ -217,12 +213,11 @@ export default function() {
- ## Data store -In the 3.0 product there's a concept of a Datastore. A CSV file that you can upload to the service and then attach to your user scenario for accessing and using the data in your user scenario logic. +In the 3.0 product there's a concept of a datastore. A CSV file that you can upload to the service and then attach to your user scenario for accessing and using the data in your user scenario logic. -In the 4.0 product there's no specific concept of a Datastore, but in k6 you have two different ways to separate test parameterization data from script logic. +In the 4.0 product there's no specific concept of a datastore, but in k6 you have two different ways to separate test parameterization data from script logic. Both of the examples below can be run with: @@ -234,7 +229,6 @@ k6 run --vus 3 --iterations 3 script.js
- ## Use the open() scripting API to open a CSV/JSON/TXT file: more info here: [open](/javascript-api/init-context/open-filepath-mode) @@ -260,7 +254,6 @@ more info here: [open](/javascript-api/init-context/open-filepath-mode)
-
```JavaScript @@ -275,7 +268,6 @@ export default function() {
- ## Put the data in a JS file and import it as a module:
@@ -301,7 +293,6 @@ export let users = [ ## Main Script: -
```JavaScript @@ -313,6 +304,7 @@ export default function() { sleep(3); } ``` +
## Custom metrics @@ -342,5 +334,4 @@ export default function() {
- For more information, see our docs on [custom metrics](/using-k6/metrics#custom-metrics) (Additional metrics for `Counter`, `Gauge` and `Rate` are available beyond the `Trend` one used above). diff --git a/src/data/markdown/docs/03 cloud/07 Cloud FAQ/11 Test status codes.md b/src/data/markdown/docs/03 cloud/07 Cloud FAQ/11 Test status codes.md index 364727ec41..f89ceca1d4 100644 --- a/src/data/markdown/docs/03 cloud/07 Cloud FAQ/11 Test status codes.md +++ b/src/data/markdown/docs/03 cloud/07 Cloud FAQ/11 Test status codes.md @@ -1,83 +1,93 @@ --- -title: "Test Status codes" -excerpt: "List of the different test statuses that can be returned by k6 cloud service, with reasons and fixes for dealing with such a status." +title: 'Test Status codes' +excerpt: 'List of the different test statuses that can be returned by k6 cloud service, with reasons and fixes for dealing with such a status.' --- ## Purpose -Explanation of the different test statuses in k6 along with the code returned. The code returned here is different than what is returned by k6. - -Status | Description --------|------------------------ --2 | Created --1 | Validated -0 | Queued -1 | Initializing -2 | Running -3 | Finished -4 | Timed out -5 | Aborted by user -6 | Aborted by system -7 | Aborted by script error -8 | Aborted by threshold -9 | Aborted by limit - - - -Every successful test, will go through the following statuses. The time from Created -> Running, is typically very short and hardly noticeable as you use the platform. +Explanation of the different test statuses in k6 along with the code returned. The code returned here is different than what is returned by k6. + +| Status | Description | +| ------ | ----------------------- | +| -2 | Created | +| -1 | Validated | +| 0 | Queued | +| 1 | Initializing | +| 2 | Running | +| 3 | Finished | +| 4 | Timed out | +| 5 | Aborted by user | +| 6 | Aborted by system | +| 7 | Aborted by script error | +| 8 | Aborted by threshold | +| 9 | Aborted by limit | + +Every successful test, will go through the following statuses. The time from Created -> Running, is typically very short and hardly noticeable as you use the platform. ## Created + A test that is newly created, but has not yet been validated. ## Validated + A test which has finished initial validation, but has not been queued to run yet. ## Queued -A test which has entered our queue. Once it is picked up by a test worker, it will begin initializing. + +A test which has entered our queue. Once it is picked up by a test worker, it will begin initializing. ## Initializing + A test which has been assigned to Load Generators, but has not yet started to make HTTP requests. ## Running + A test which is actively making HTTP(s) or websocket requests ## Finished -A test which has finished running. If thresholds were used, no thresholds have failed. -*** +A test which has finished running. If thresholds were used, no thresholds have failed. -When a does not finish as expected, you the test will have one of the following statues. +--- +When a does not finish as expected, you the test will have one of the following statues. ## Timed Out + A test which has not received or sent any information for a long time ## Aborted (by user) -A test which was aborted by the user. Tests aborted by user count against your total usage. + +A test which was aborted by the user. Tests aborted by user count against your total usage. ## Aborted (by system) -A test that was aborted by the system. These tests typically abort due to a fatal error occuring. If the test fails before launch, there may be an underlying issue with the Load Zone, unrelated to k6. If the test aborts during execution, it may be due to overutilization of the Load Generators. In this case, we suggest you look at the CPU and Memory utilization and add or increase sleep times. You may also want to set the option `discardRepsonseBodies` to `true`, to lower memory pressure. + +A test that was aborted by the system. These tests typically abort due to a fatal error occurring. If the test fails before launch, there may be an underlying issue with the Load Zone, unrelated to k6. If the test aborts during execution, it may be due to overutilization of the Load Generators. In this case, we suggest you look at the CPU and Memory utilization and add or increase sleep times. You may also want to set the option `discardRepsonseBodies` to `true`, to lower memory pressure. ## Aborted (script error) -A test that was aborted due to an error in your script. For example, if you were to capture data from the response body of a request that you reuse in a future request. If the first request were to fail, your future request would contain a null value. Sudden script errors can suggest a performance issue. Fix the performance issue or add error handling to account for these cases. + +A test that was aborted due to an error in your script. For example, if you were to capture data from the response body of a request that you reuse in a future request. If the first request were to fail, your future request would contain a null value. Sudden script errors can suggest a performance issue. Fix the performance issue or add error handling to account for these cases. ## Aborted (by threshold) + A test that exceeded your defined threshold value and that threshold was given the option to automatically abort the test. ## Aborted (by limit) + A test that has exceeded one or more of the following limits: -- There are "too many" (>40) groups in a test -- There are "too many" (>10,000) metrics reported -- The duration is longer than 60 mins (for tests longer than 60 min, please contact us) + +- The test contains too many groups (>40) +- The test reports too many metrics (>10,000) +- The duration is longer than 60 minutes (for tests longer than 60 minutes, please contact us) - The max VUs is higher than 20,000 VUs (for tests higher than 20k, please contact us) -If your test has too many groups, please reduce their number. If your test has too many metrics, please use URL grouping to combine similar URLs. You should also remove external requests from your test script. Each URL captured will account for 7 individual metrics that we keep track of. External requests can quickly produce a large number of metrics that aren't helpful to the understanding performance of the System Under Test. +If your test has too many groups, please reduce their number. If your test has too many metrics, please use URL grouping to combine similar URLs. You should also remove external requests from your test script. Each URL captured will account for 7 individual metrics that we keep track of. External requests can quickly produce a large number of metrics that aren't helpful to the understanding performance of the System Under Test. ## Uploading results When you send the [k6 results to the k6 Cloud](/results-visualization/cloud), data will be continuously streamed to the cloud. While this happens the state of the test run will be marked as `Running`. A test run that ran its course will be marked `Finished`. The run state has nothing to do with the test passing any thresholds, only that the test itself is operating correctly. -If you deliberately abort your test (e.g. by pressing *Ctrl-C*), it will still be considered `Finished`. You can still look and analyze the test data you streamed so far. The test will just have run shorter than originally planned. +If you deliberately abort your test (e.g. by pressing _Ctrl-C_), it will still be considered `Finished`. You can still look and analyze the test data you streamed so far. The test will just have run shorter than originally planned. Another possibility would be if you lose network connection with the k6 Cloud while your test is running. In that case the k6 Cloud will patiently wait for you to reconnect. In the meanwhile your test's run state will continue to appear as `Running` on the web app. diff --git a/src/data/markdown/docs/04 integrations/02 Results visualization/04 datadog.md b/src/data/markdown/docs/04 integrations/02 Results visualization/04 datadog.md index 4706ef7230..4568c07000 100644 --- a/src/data/markdown/docs/04 integrations/02 Results visualization/04 datadog.md +++ b/src/data/markdown/docs/04 integrations/02 Results visualization/04 datadog.md @@ -1,4 +1,4 @@ --- -title: 'DataDog' +title: 'Datadog' redirect: 'https://k6.io/docs/results-visualization/datadog' --- diff --git a/src/data/markdown/docs/05 Examples/01 Examples/02 http-authentication.md b/src/data/markdown/docs/05 Examples/01 Examples/02 http-authentication.md index 4a7c75976f..1cd5b9b0ab 100644 --- a/src/data/markdown/docs/05 Examples/01 Examples/02 http-authentication.md +++ b/src/data/markdown/docs/05 Examples/01 Examples/02 http-authentication.md @@ -118,7 +118,7 @@ does not support this authentication mechanism out of the box, so we'll have to a Node.js library called [awsv4.js](https://github.com/mhart/aws4) and [Browserify](http://browserify.org/) (to make it work in k6). -There are a few steps required to make this work: +For this to work, we first need to do the following: 1. Make sure you have the necessary prerequisites installed: [Node.js](https://nodejs.org/en/download/) and [Browserify](http://browserify.org/) diff --git a/src/data/markdown/docs/05 Examples/01 Examples/04 correlation-and-dynamic-data.md b/src/data/markdown/docs/05 Examples/01 Examples/04 correlation-and-dynamic-data.md index 029e4c4ce4..4af7555869 100644 --- a/src/data/markdown/docs/05 Examples/01 Examples/04 correlation-and-dynamic-data.md +++ b/src/data/markdown/docs/05 Examples/01 Examples/04 correlation-and-dynamic-data.md @@ -34,7 +34,7 @@ and can be handled with a little bit of scripting. import http from 'k6/http'; import { check } from 'k6'; -export default function() { +export default function () { // Make a request that returns some JSON data let res = http.get('https://httpbin.org/json'); @@ -43,8 +43,8 @@ export default function() { // navigating the JSON data as a JS object with dot notation. let slide1 = res.json().slideshow.slides[0]; check(slide1, { - 'slide 1 has correct title': s => s.title === 'Wake up to WonderWidgets!', - 'slide 1 has correct type': s => s.type === 'all', + 'slide 1 has correct title': (s) => s.title === 'Wake up to WonderWidgets!', + 'slide 1 has correct type': (s) => s.type === 'all', }); // Now we could use the "slide1" variable in subsequent requests... @@ -61,8 +61,8 @@ export default function() { ### Extracting values/tokens from form fields -There are primarily two different ways you can choose from when deciding how to handle form -submissions. Either you use the higher-level [Response.submitForm([params])](/javascript-api/k6-http/response/response-submitform-params) API +You can choose from two different approaches when deciding how to handle form submissions. +Either you use the higher-level [Response.submitForm([params])](/javascript-api/k6-http/response/response-submitform-params) API or you extract necessary hidden fields etc. and build a request yourself and then send it using the appropriate `http.*` family of APIs, like [http.post(url, [body], [params])](/javascript-api/k6-http/post-url-body-params). @@ -106,5 +106,5 @@ export default function() { **Relevant k6 APIs**: - [Selection.find(selector)](/javascript-api/k6-html/selection/selection-find-selector) (the [jQuery Selector API](http://api.jquery.com/category/selectors/) - docs are also a good resource on what possible selector queryies can be made) + docs are also a good resource on what possible selector queries can be made) - [Selection.attr(name)](/javascript-api/k6-html/selection/selection-attr-name) diff --git a/src/data/markdown/docs/05 Examples/01 Examples/05 data-parameterization.md b/src/data/markdown/docs/05 Examples/01 Examples/05 data-parameterization.md index bfadf8e21b..6e1b5e7648 100644 --- a/src/data/markdown/docs/05 Examples/01 Examples/05 data-parameterization.md +++ b/src/data/markdown/docs/05 Examples/01 Examples/05 data-parameterization.md @@ -60,7 +60,7 @@ export default function () { As k6 doesn't support parsing CSV files natively, we'll have to resort to using a library called [Papa Parse](https://www.papaparse.com/). -You can download the library and and import it locally like this: +You can download the library and import it locally like this:
@@ -186,7 +186,7 @@ For direct comparison 100VUs used near 2GB of memory. Playing with the value for `splits` will give a different balance between memory used and the amount of data each VU has. -A second approach using another technique will be to pre split the data in different files and load and parse only the one for each VU. +A second approach using another technique will be to pre-split the data in different files and load and parse only the one for each VU.
diff --git a/src/data/markdown/docs/05 Examples/01 Examples/09 data-uploads.md b/src/data/markdown/docs/05 Examples/01 Examples/09 data-uploads.md index a37ebe74f6..2b49d4b4c4 100644 --- a/src/data/markdown/docs/05 Examples/01 Examples/09 data-uploads.md +++ b/src/data/markdown/docs/05 Examples/01 Examples/09 data-uploads.md @@ -1,14 +1,14 @@ --- -title: "Data Uploads" -excerpt: "Scripting examples on how to execute a load test that will upload a file to the System Under Test(SUT)." +title: 'Data Uploads' +excerpt: 'Scripting examples on how to execute a load test that will upload a file to the System Under Test(SUT).' --- Example to execute a load test that will upload a file to the System Under Test(SUT). ## The open() function -There is a builtin function, [`open()`](/javascript-api/init-context/open-filepath-mode), -that given a file or a URL will return its contents. +Using the built-in function, [`open()`](/javascript-api/init-context/open-filepath-mode), +we are able to read the contents of a file given a filename or URL. Below is a simple example showing how to load the contents of a local file `data.json`. @@ -25,11 +25,9 @@ Below is a simple example showing how to load the contents of a local file `data
```js -const data = JSON.parse( - open("./data.json") -); +const data = JSON.parse(open('./data.json')); -export default function() { +export default function () { console.log(data.my_key); } ``` @@ -59,18 +57,18 @@ below):
```js -import http from "k6/http"; -import { sleep } from "k6"; +import http from 'k6/http'; +import { sleep } from 'k6'; -let binFile = open("/path/to/file.bin", "b"); +let binFile = open('/path/to/file.bin', 'b'); -export default function() { +export default function () { var data = { - field: "this is a standard form field", - file: http.file(binFile, "test.bin") + field: 'this is a standard form field', + file: http.file(binFile, 'test.bin'), }; - var res = http.post("https://example.com/upload", data); + var res = http.post('https://example.com/upload', data); sleep(3); } ``` @@ -84,8 +82,7 @@ or any of the other HTTP request functions, where one of the property values is [FileData](/javascript-api/k6-http/filedata) a multipart request will be constructed and sent. - ### Relevant k6 APIs + - [open(filePath, [mode])](/javascript-api/init-context/open-filepath-mode) - [http.file(data, [filename], [contentType])](/javascript-api/k6-http/file-data-filename-contenttype) - diff --git a/src/data/markdown/docs/05 Examples/01 Examples/11 generating-uuids.md b/src/data/markdown/docs/05 Examples/01 Examples/11 generating-uuids.md index 62090f3add..7db8d85340 100644 --- a/src/data/markdown/docs/05 Examples/01 Examples/11 generating-uuids.md +++ b/src/data/markdown/docs/05 Examples/01 Examples/11 generating-uuids.md @@ -1,6 +1,6 @@ --- -title: "Generating UUIDs" -excerpt: "Scripting example on how to generate UUIDs in your load test." +title: 'Generating UUIDs' +excerpt: 'Scripting example on how to generate UUIDs in your load test.' --- Scripting example on how to generate UUIDs in your load test. @@ -9,11 +9,12 @@ Note that if you don't need v1 UUIDs, consider using the `uuidv4` function from the [k6 JS lib repository](https://jslib.k6.io/). ## Generate v1 and v4 UUIDs + Universally unique identifiers are handy in many scenarios, as k6 doesn't have built-in support for UUIDs, we'll have to resort to using a Node.js library called [uuid](https://www.npmjs.com/package/uuid) and [Browserify](http://browserify.org/) (to make it work in k6). -There are a few steps required to make this work: +For this to work, we first need to go through a few required steps: 1. Make sure you have the necessary prerequisites installed: [Node.js](https://nodejs.org/en/download/) and [Browserify](http://browserify.org/) @@ -21,11 +22,12 @@ There are a few steps required to make this work: 2. Install the `uuid` library:
- ```shell - $ npm install uuid@3.4.0 - ``` + ```shell + $ npm install uuid@3.4.0 + ```
+ 3. Run it through browserify:
@@ -46,22 +48,21 @@ There are a few steps required to make this work:
- Here's an example generating a v1 and v4 UUID:
```js -import uuid from "./uuid.js"; +import uuid from './uuid.js'; -export default function() { - // Generate a UUID v1 - let uuid1 = uuid.v1(); - console.log(uuid1); +export default function () { + // Generate a UUID v1 + let uuid1 = uuid.v1(); + console.log(uuid1); - // Generate a UUID v4 - let uuid4 = uuid.v4(); - console.log(uuid4); + // Generate a UUID v4 + let uuid4 = uuid.v4(); + console.log(uuid4); } ``` diff --git a/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/04 Tests.md b/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/04 Tests.md index 4bcc6b44cc..17d648354b 100644 --- a/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/04 Tests.md +++ b/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/04 Tests.md @@ -4,43 +4,43 @@ excerpt: '' draft: 'true' --- - ## List tests -Returns all tests within a specified project (if project is not specified then default project will be used). You may sort or query the results with the specfied options. +Returns all tests within a specified project (if project is not specified then default project will be used). You may sort or query the results with the specified options. **GET** `/loadtests/v2/tests` -| Query Parameters | Type | Description | Example | -| ----------| ---- | ----------- | ---------- | -| project_id | integer | Returns tests associated with a given project_id | `/loadtests/v2/tests?project_id={project_id}` | -| ids[] | integer | Returns tests with given ids. | `/loadtests/v2/tests?ids[]={id_1}&ids[]={id_2}` | -| order_by | string | Allows you to select the parameter to use to order the returned tests. Available parameter values: `id`, `name`, `last_run_time`. | `/loadtests/v2/tests?project_id={project_id}&order_by=last_run_time` | -| q | string | Returns tests containing specified string in their `name` field. | `/loadtests/v2/tests?project_id={project_id}&q=some_string` | -| page | integer | A page number within the paginated result set. | `/loadtests/v2/tests?project_id={project_id}&page=2&page_size=5` | -| page_size | integer | Number of results to return per page. | `/loadtests/v2/tests?project_id={project_id}&page=2&page_size=5` | - +| Query Parameters | Type | Description | Example | +| ---------------- | ------- | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------- | +| project_id | integer | Returns tests associated with a given project_id | `/loadtests/v2/tests?project_id={project_id}` | +| ids[] | integer | Returns tests with given ids. | `/loadtests/v2/tests?ids[]={id_1}&ids[]={id_2}` | +| order_by | string | Allows you to select the parameter to use to order the returned tests. Available parameter values: `id`, `name`, `last_run_time`. | `/loadtests/v2/tests?project_id={project_id}&order_by=last_run_time` | +| q | string | Returns tests containing specified string in their `name` field. | `/loadtests/v2/tests?project_id={project_id}&q=some_string` | +| page | integer | A page number within the paginated result set. | `/loadtests/v2/tests?project_id={project_id}&page=2&page_size=5` | +| page_size | integer | Number of results to return per page. | `/loadtests/v2/tests?project_id={project_id}&page=2&page_size=5` |
```json { - "k6-runs": [ ], - "k6-tests": [{ - "id": 0, - "project_id": 0, - "user_id": 0, - "name": "string", - "created": "2020-08-13T18:28:45Z", - "updated": "2020-08-13T18:28:45Z", - "last_test_run_id": "string", - "test_run_ids": [0], - "script": "string" - }], - "load_zones": [ ], - "meta": { - "count": 1 + "k6-runs": [], + "k6-tests": [ + { + "id": 0, + "project_id": 0, + "user_id": 0, + "name": "string", + "created": "2020-08-13T18:28:45Z", + "updated": "2020-08-13T18:28:45Z", + "last_test_run_id": "string", + "test_run_ids": [0], + "script": "string" } + ], + "load_zones": [], + "meta": { + "count": 1 + } } ``` @@ -52,27 +52,25 @@ Returns details of a test with the specified ID. **GET** `/loadtests/v2/tests/{id}` -| Path Parameter | Type | Description | -| ----------| ---- | ----------- | -| id | integer | A unique integer value identifying this test. | - - +| Path Parameter | Type | Description | +| -------------- | ------- | --------------------------------------------- | +| id | integer | A unique integer value identifying this test. |
```json { - "k6-test": { - "id": 0, - "project_id": 0, - "user_id": 0, - "name": "string", - "created": "2020-08-13T18:28:45Z", - "updated": "2020-08-13T18:28:45Z", - "last_test_run_id": "string", - "test_run_ids": [], - "script": "string" - } + "k6-test": { + "id": 0, + "project_id": 0, + "user_id": 0, + "name": "string", + "created": "2020-08-13T18:28:45Z", + "updated": "2020-08-13T18:28:45Z", + "last_test_run_id": "string", + "test_run_ids": [], + "script": "string" + } } ``` @@ -84,31 +82,30 @@ Partially updates a test in Load Impact. Note: only given fields will be updated **PATCH** `/loadtests/v2/tests/{id}` -| Path Parameter | Type | Description | -| ----------| ---- | ----------- | -| id | integer | A unique integer value identifying this test. | - -| Request Body Parameter | Type | Description | -| ----------| ---- | ----------- | -| name | string | Name of the test. | -| script | string | k6 script that will be used when starting test runs. | +| Path Parameter | Type | Description | +| -------------- | ------- | --------------------------------------------- | +| id | integer | A unique integer value identifying this test. | +| Request Body Parameter | Type | Description | +| ---------------------- | ------ | ---------------------------------------------------- | +| name | string | Name of the test. | +| script | string | k6 script that will be used when starting test runs. |
```json { - "k6-test": { - "id": 0, - "project_id": 0, - "user_id": 0, - "name": "string", - "created": "2020-08-13T18:28:45Z", - "updated": "2020-08-13T18:28:45Z", - "last_test_run_id": "string", - "test_run_ids": [0], - "script": "string" - } + "k6-test": { + "id": 0, + "project_id": 0, + "user_id": 0, + "name": "string", + "created": "2020-08-13T18:28:45Z", + "updated": "2020-08-13T18:28:45Z", + "last_test_run_id": "string", + "test_run_ids": [0], + "script": "string" + } } ``` @@ -120,8 +117,8 @@ Deletes a test with the specified test ID. **DELETE** `/loadtests/v2/tests/{id}` -| Path Parameter | Type | Description | -| ----------| ---- | ----------- | -| id | integer | A unique integer value identifying this test. | +| Path Parameter | Type | Description | +| -------------- | ------- | --------------------------------------------- | +| id | integer | A unique integer value identifying this test. | **RESPONSE** `204` diff --git a/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/05 Test Runs.md b/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/05 Test Runs.md index 069bee1e34..c074937aaf 100644 --- a/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/05 Test Runs.md +++ b/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/05 Test Runs.md @@ -10,40 +10,40 @@ Starts a test-run in the k6 cloud. It uses the specified test ID, previously ret **POST** `/loadtests/v2/tests/{id}/start-testrun` -| Path Parameter | Type | Description | -| ----------| ---- | ----------- | -| id | integer | A unique integer value identifying this test. | +| Path Parameter | Type | Description | +| -------------- | ------- | --------------------------------------------- | +| id | integer | A unique integer value identifying this test. |
```json { - "k6-run": { - "config": {}, - "created": "2020-08-13T18:28:45Z", - "duration": 0, - "ended": "2020-08-13T18:28:45Z", - "error_code": 0, - "error_detail": "string", - "id": 0, - "k6_archive": "string", - "load_time": 0, - "note": "string", - "organization_id": 0, - "processing_status": 0, - "project_id": 0, - "public_id": 0, - "result_status": 0, - "run_process": "string", - "run_status": 0, - "script": "string", - "started": "2020-08-13T18:28:45Z", - "stopped_by_id": 0, - "test_id": 0, - "user_id": 0, - "version": "string", - "vus": 0 - } + "k6-run": { + "config": {}, + "created": "2020-08-13T18:28:45Z", + "duration": 0, + "ended": "2020-08-13T18:28:45Z", + "error_code": 0, + "error_detail": "string", + "id": 0, + "k6_archive": "string", + "load_time": 0, + "note": "string", + "organization_id": 0, + "processing_status": 0, + "project_id": 0, + "public_id": 0, + "result_status": 0, + "run_process": "string", + "run_status": 0, + "script": "string", + "started": "2020-08-13T18:28:45Z", + "stopped_by_id": 0, + "test_id": 0, + "user_id": 0, + "version": "string", + "vus": 0 + } } ``` @@ -51,142 +51,141 @@ Starts a test-run in the k6 cloud. It uses the specified test ID, previously ret ## Read test run -Returns details of a test run with the specified ID. There are several fields in response that are helpful for checking test run's status: +Returns details of a test run with the specified ID. The response contains several fields that are helpful for checking the test run's status: `run_status` - Describes how far test run is in the execution pipeline. Possible values are: -| Value | Description | -| ----- | ---------- | -| -2 | CREATED - test run is created in our system. | -| -1 | VALIDATED - test run passed script and subscription validation. | -| 0 | QUEUED - test run is waiting for an empty slot in execution queue. | -| 1 | INITIALIZING - load generator instances are being allocated for the test run. | -| 2 | RUNNING - test run is currently executing. | -| 3 | FINISHED - test run has finished executing. | -| 4 | TIMED_OUT - test run has timed out (usually because data took too long to arrive/process). | -| 5 | ABORTED_BY_USER - test run was aborted by user. | -| 6 | ABORTED_BY_SYSTEM - test run was aborted by our system (usually because of some error). | -| 7 | ABORTED_BY_SCRIPT_ERROR - test run was aborted due to an error in the script. | -| 8 | ABORTED_BY_THRESHOLD - test run was aborted because a threshold defined in k6 script was reached. | -| 9 | ABORTED_BY_LIMIT - test run was aborted because of a limit in script definition or execution (e.g. subscription limitations). | +| Value | Description | +| ----- | ----------------------------------------------------------------------------------------------------------------------------- | +| -2 | CREATED - test run is created in our system. | +| -1 | VALIDATED - test run passed script and subscription validation. | +| 0 | QUEUED - test run is waiting for an empty slot in execution queue. | +| 1 | INITIALIZING - load generator instances are being allocated for the test run. | +| 2 | RUNNING - test run is currently executing. | +| 3 | FINISHED - test run has finished executing. | +| 4 | TIMED_OUT - test run has timed out (usually because data took too long to arrive/process). | +| 5 | ABORTED_BY_USER - test run was aborted by user. | +| 6 | ABORTED_BY_SYSTEM - test run was aborted by our system (usually because of some error). | +| 7 | ABORTED_BY_SCRIPT_ERROR - test run was aborted due to an error in the script. | +| 8 | ABORTED_BY_THRESHOLD - test run was aborted because a threshold defined in k6 script was reached. | +| 9 | ABORTED_BY_LIMIT - test run was aborted because of a limit in script definition or execution (e.g. subscription limitations). | `processing_status` - Describes if system is (still) processing metric data for the test run. Possible values are: -| Value | Description | -| ----- | ---------- | -| 0 | NOT_STARTED - data processing has not started yet. | -| 1 | PROCESSING - data processing is in progress. | -| 2 | FINISHED - data processing is finished and all metrics are available. | -| 3 | ERROR - there was an error in data processing and some or all metrics are not available. | - +| Value | Description | +| ----- | ---------------------------------------------------------------------------------------- | +| 0 | NOT_STARTED - data processing has not started yet. | +| 1 | PROCESSING - data processing is in progress. | +| 2 | FINISHED - data processing is finished and all metrics are available. | +| 3 | ERROR - there was an error in data processing and some or all metrics are not available. | `result_status` - Describes if the test has passed or failed. Possible values are: -| Value | Description | -| ----- | ---------- | -| 0 | PASSED - all criteria defined in k6 script have passed. | -| 1 | FAILED - on or more criteria in k6 script has failed. | +| Value | Description | +| ----- | ------------------------------------------------------- | +| 0 | PASSED - all criteria defined in k6 script have passed. | +| 1 | FAILED - on or more criteria in k6 script has failed. | **GET** `/loadtests/v2/runs/{id}` -| Path Parameter | Type | Description | -| ----------| ---- | ----------- | -| id | integer | ID of the test run. | +| Path Parameter | Type | Description | +| -------------- | ------- | ------------------- | +| id | integer | ID of the test run. | -| Query Parameter | Type | Description | Example | -| ----------| ---- | ----------- | ---------- | -| $select | string | Specify a subset of fields to return. | `/loadtests/v2/runs/1?$select=id,test_id,run_status,created` | +| Query Parameter | Type | Description | Example | +| --------------- | ------ | ------------------------------------- | ------------------------------------------------------------ | +| \$select | string | Specify a subset of fields to return. | `/loadtests/v2/runs/1?$select=id,test_id,run_status,created` |
```json { - "k6-run": { - "config": {}, - "created": "2020-08-18T13:33:41", - "duration": 120, - "ended": "2020-08-18T13:36:02", - "error_code": null, - "error_detail": null, - "export": null, - "id": 0, - "is_baseline": false, - "k6_archive": "string", - "load_time": 75.0, - "note": "", - "organization_id": 0, - "processing_status": 2, - "project_id": 0, - "public_id": null, - "request_builder_config": null, - "result_status": 1, - "run_process": "k6 to Cloud", - "run_status": 3, - "script": "", - "started": "2020-08-18T13:34:04", - "stopped_by_id": 0, - "test_id": 0, - "user_id": 0, - "version": 2, - "vus": 200, - "vus_per_instance": 0 - } + "k6-run": { + "config": {}, + "created": "2020-08-18T13:33:41", + "duration": 120, + "ended": "2020-08-18T13:36:02", + "error_code": null, + "error_detail": null, + "export": null, + "id": 0, + "is_baseline": false, + "k6_archive": "string", + "load_time": 75.0, + "note": "", + "organization_id": 0, + "processing_status": 2, + "project_id": 0, + "public_id": null, + "request_builder_config": null, + "result_status": 1, + "run_process": "k6 to Cloud", + "run_status": 3, + "script": "", + "started": "2020-08-18T13:34:04", + "stopped_by_id": 0, + "test_id": 0, + "user_id": 0, + "version": 2, + "vus": 200, + "vus_per_instance": 0 + } } ```
- ## List test runs Returns test runs for a particular test. **GET** `/loadtests/v2/runs?test_id={test_id}` -| Query Parameter | Type | Description | Example | -| ----------| ---- | ----------- | ---------- | -| test_id | integer | ID of the test. | `/loadtests/v2/runs?$test_id=1` | -| $select | string | Specify a subset of fields to return. | `/loadtests/v2/runs?$test_id=1&select=id,test_id,run_status,created` | -| ids[] | integer | Specify a subset test runs to return. | `/loadtests/v2/runs?$test_id=1&ids[]=1&ids[]=3` | -| public_id | string | Get a test run by public_id. | `/loadtests/v2/runs?public_id={public_id}` | - +| Query Parameter | Type | Description | Example | +| --------------- | ------- | ------------------------------------- | -------------------------------------------------------------------- | +| test_id | integer | ID of the test. | `/loadtests/v2/runs?$test_id=1` | +| \$select | string | Specify a subset of fields to return. | `/loadtests/v2/runs?$test_id=1&select=id,test_id,run_status,created` | +| ids[] | integer | Specify a subset test runs to return. | `/loadtests/v2/runs?$test_id=1&ids[]=1&ids[]=3` | +| public_id | string | Get a test run by public_id. | `/loadtests/v2/runs?public_id={public_id}` |
```json { - "k6-runs": [{ - "config": {}, - "created": "2020-08-18T13:33:41", - "duration": 120, - "ended": "2020-08-18T13:36:02", - "error_code": null, - "error_detail": null, - "id": 0, - "is_baseline": false, - "k6_archive": "string", - "load_time": 75.0, - "note": "", - "organization_id": 0, - "processing_status": 2, - "project_id": 0, - "public_id": null, - "request_builder_config": null, - "result_status": 1, - "run_process": "k6 to Cloud", - "run_status": 3, - "script": "", - "started": "2020-08-18T13:34:04", - "stopped_by_id": 0, - "test_id": 0, - "user_id": 0, - "version": 2, - "vus": 200, - "vus_per_instance": 0 - }], - "meta": { - "count": 1 + "k6-runs": [ + { + "config": {}, + "created": "2020-08-18T13:33:41", + "duration": 120, + "ended": "2020-08-18T13:36:02", + "error_code": null, + "error_detail": null, + "id": 0, + "is_baseline": false, + "k6_archive": "string", + "load_time": 75.0, + "note": "", + "organization_id": 0, + "processing_status": 2, + "project_id": 0, + "public_id": null, + "request_builder_config": null, + "result_status": 1, + "run_process": "k6 to Cloud", + "run_status": 3, + "script": "", + "started": "2020-08-18T13:34:04", + "stopped_by_id": 0, + "test_id": 0, + "user_id": 0, + "version": 2, + "vus": 200, + "vus_per_instance": 0 } + ], + "meta": { + "count": 1 + } } ``` diff --git a/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/06 Test Run Metrics.md b/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/06 Test Run Metrics.md index 686e0558a8..863661d506 100644 --- a/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/06 Test Run Metrics.md +++ b/src/data/markdown/docs/06 Cloud REST API/01 Cloud REST API/06 Test Run Metrics.md @@ -6,21 +6,22 @@ draft: 'true' ## List metrics -Returns all metrics within a specified test run. Test run ids can be found from `test_run_ids` field in response from Read Test or from `id` field in response from Start Test Run endpoint. +Returns all metrics within a specified test run. The test run ids are available in the `test_run_ids` field in response from Read Test or from `id` field in response from Start Test Run endpoint. Note that k6 cloud may store multiple (sub)metrics for each metric generated in the k6 script. For example, when sending an HTTP request within the script, k6 cloud will store `http_req_duration`, `http_req_blocked`, `http_req_connecting` and other metrics for that particular endpoint. -Also, separate metrics will be created for different HTTP methods and status codes (e.g. same url will produce multiple metrics if k6 detects statuses such as 200, 400 etc). +Differing HTTP methods and statuses are all grouped as separate metrics. (e.g. same URL produces multiple metrics if k6 detects statuses such as 200, 400 etc). -Some of the fields contained in response are described here: -* `check_id` - ID of the `check` defined in `k6` script. Checks have their underlying metrics stored in k6 cloud. -* `group_id` - ID of the `group` this metric belongs to. -* `contains` - "Unit" for the metrics. Some examples are: `time`, `percent`, `bytes` et. +Some of the fields contained in the response are: + +- `check_id` - ID of the `check` defined in `k6` script. Checks have their underlying metrics stored in k6 cloud. +- `group_id` - ID of the `group` this metric belongs to. +- `contains` - "Unit" for the metrics. Some examples are: `time`, `percent`, `bytes` et. **GET** `/loadtests/v2/metrics?test_run_id={test_run_id}` -| Query Parameter | Type | Description | -| ----------| ---- | ----------- | -| test_run_id | integer | Returns metrics associated with a given test_run_id. | +| Query Parameter | Type | Description | +| --------------- | ------- | ---------------------------------------------------- | +| test_run_id | integer | Returns metrics associated with a given test_run_id. |
@@ -35,9 +36,9 @@ Some of the fields contained in response are described here: "name": "http_req_duration", "project_id": 123, "tags": { - "method": "GET", - "status": "200", - "url": "https://test.k6.io" + "method": "GET", + "status": "200", + "url": "https://test.k6.io" }, "test_run_id": 103054, "type": "trend", @@ -49,24 +50,24 @@ Some of the fields contained in response are described here:
- ## Read metric Returns details of a metric with the specified ID. **GET** `/loadtests/v2/metrics/{id}` +| Path Parameter | Type | Description | +| -------------- | ------ | --------------------------- | +| id | string | Return metric given the id. | -| Path Parameter | Type | Description | -| ----------| ---- | ----------- | -| id | string | Return metric given the id. | - + -| Query Parameters | Type | Description | Example | -| ----------| ---- | ----------- | ---------- | -| test_run_id | integer | Returns metric associated with a given test_run_id. | `/loadtests/v2/metrics/{metric_id}?test_run_id={test_run_id}` | -| include[] | string | Specifies additional information to be included in response. Allowed options: url, group, check. | `/loadtests/v2/metrics/{metric_id}?test_run_id={test_run_id}&include[]=check&include[]=group` | +| Query Parameters | Type | Description | Example | +| ---------------- | ------- | ------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------- | +| test_run_id | integer | Returns metric associated with a given test_run_id. | `/loadtests/v2/metrics/{metric_id}?test_run_id={test_run_id}` | +| include[] | string | Specifies additional information to include in the response. Allowed options: url, group, check. | `/loadtests/v2/metrics/{metric_id}?test_run_id={test_run_id}&include[]=check&include[]=group` | +
@@ -103,19 +104,19 @@ Returns details of a metric with the specified ID.
- ## Read series data -Returns timeseries data for specified metrics ids within a specified test run id. -Test run ids can be found from `test_run_ids` field in response from Read Test or from `id` field in response from Start Test Run endpoint. Metric ids can be found from `id` field in response from List metrics endpoint. +Returns time series data for specified metrics ids within a specified test run id. Test run ids are +available in the `test_run_ids` field of a response from `Read Test` or in the `id` field of a response +from `Start Test Run` endpoint. Metric ids are available in the `id` field of the response from the +List metrics endpoint. **GET** `/loadtests/v2/series?test_run_id={test_run_id}&ids[]={metric_id_1}` -| Query Parameters | Type | Description | Example | -| ----------| ---- | ----------- | ---------- | -| test_run_id | integer | Specify test run id. | | -| ids[] | string | Specify metric id(s). | `/loadtests/v2/series?test_run_id={test_run_id}&ids[]={metric_id_1}&ids[]={metric_id_2}` | - +| Query Parameters | Type | Description | Example | +| ---------------- | ------- | --------------------- | ---------------------------------------------------------------------------------------- | +| test_run_id | integer | Specify test run id. | | +| ids[] | string | Specify metric id(s). | `/loadtests/v2/series?test_run_id={test_run_id}&ids[]={metric_id_1}&ids[]={metric_id_2}` |
@@ -141,93 +142,91 @@ Test run ids can be found from `test_run_ids` field in response from Read Test o
- ## List thresholds Returns all thresholds (and related metrics) for a test run. **GET** `/loadtests/v2/thresholds?test_run_id={test_run_id}&ids[]={threshold_id_1}` -| Query Parameter | Type | Description | Example | -| ----------| ---- | ----------- | ---------- | -| test_run_id | integer | Returns thresholds associated with a given test_run_id. | | -| ids[] | integer | Specify threshold id(s). | `/loadtests/v2/thresholds?test_run_id={test_run_id}&ids[]={threshold_id_1}&ids[]={threshold_id_2}` | +| Query Parameter | Type | Description | Example | +| --------------- | ------- | ------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| test_run_id | integer | Returns thresholds associated with a given test_run_id. | | +| ids[] | integer | Specify threshold id(s). | `/loadtests/v2/thresholds?test_run_id={test_run_id}&ids[]={threshold_id_1}&ids[]={threshold_id_2}` |
```json { - "k6-metrics": [ - { - "check_id": null, - "contains": "default", - "group_id": "3cb67e24358b7c3f1256ff423381c05b", - "id": "threshold_166100", - "name": "iterations", - "project_id": 3458575, - "tags": {}, - "test_run_id": 0, - "type": "counter", - "url_id": null - } - ], - "k6-thresholds": [ - { - "calc_state": { - "max_created_at": "2020-08-18 13:36:41.348933+00:00", - "max_time": "2020-08-18 13:36:04+00:00", - "min_time": "2020-08-18 13:34:05+00:00", - "tainted_value": null - }, - "calculated_value": 22024.0, - "id": 166100, - "metric_id": "threshold_166100", - "name": "threshold_1", - "stat": "count", - "tainted": false, - "tainted_at": null, - "testrun_id": 0, - "value": 200.0 - } - ] + "k6-metrics": [ + { + "check_id": null, + "contains": "default", + "group_id": "3cb67e24358b7c3f1256ff423381c05b", + "id": "threshold_166100", + "name": "iterations", + "project_id": 3458575, + "tags": {}, + "test_run_id": 0, + "type": "counter", + "url_id": null + } + ], + "k6-thresholds": [ + { + "calc_state": { + "max_created_at": "2020-08-18 13:36:41.348933+00:00", + "max_time": "2020-08-18 13:36:04+00:00", + "min_time": "2020-08-18 13:34:05+00:00", + "tainted_value": null + }, + "calculated_value": 22024.0, + "id": 166100, + "metric_id": "threshold_166100", + "name": "threshold_1", + "stat": "count", + "tainted": false, + "tainted_at": null, + "testrun_id": 0, + "value": 200.0 + } + ] } ```
- ## Read threshold Returns details of a threshold with the specified ID. **GET** `/loadtests/v2/thresholds/{id}?test_run_id={test_run_id}` -| Query Parameter | Type | Description | -| ----------| ---- | ----------- | ---------- | -| test_run_id | integer | ID of the test run. | -| id | integer | ID of the threshold. | +| Query Parameter | Type | Description | +| --------------- | ------- | -------------------- | +| test_run_id | integer | ID of the test run. | +| id | integer | ID of the threshold. |
```json { - "k6-threshold": { - "calc_state": { - "max_created_at": "2020-08-18 13:36:41.348933+00:00", - "max_time": "2020-08-18 13:36:04+00:00", - "min_time": "2020-08-18 13:34:05+00:00", - "tainted_value": null - }, - "calculated_value": 22024.0, - "id": 166100, - "metric_id": "threshold_166100", - "name": "threshold_1", - "stat": "count", - "tainted": false, - "tainted_at": null, - "test_run_id": 0, - "value": 200.0 - } + "k6-threshold": { + "calc_state": { + "max_created_at": "2020-08-18 13:36:41.348933+00:00", + "max_time": "2020-08-18 13:36:04+00:00", + "min_time": "2020-08-18 13:34:05+00:00", + "tainted_value": null + }, + "calculated_value": 22024.0, + "id": 166100, + "metric_id": "threshold_166100", + "name": "threshold_1", + "stat": "count", + "tainted": false, + "tainted_at": null, + "test_run_id": 0, + "value": 200.0 + } } ``` @@ -235,36 +234,35 @@ Returns details of a threshold with the specified ID. ## Read test run overview -Returns an overview of the test run whcih includes numbers of URLs, thresholds, checks, etc. +Returns an overview of the test run which includes numbers of URLs, thresholds, checks, etc. **GET** `/loadtests/v2/run-overviews?test_run_id={test_run_id}` -| Query Parameter | Type | Description | -| ----------| ---- | ----------- | -| test_run_id | integer | A unique integer value identifying this test run. | - +| Query Parameter | Type | Description | +| --------------- | ------- | ------------------------------------------------- | +| test_run_id | integer | A unique integer value identifying this test run. |
```json { - "k6-run-overviews": [ - { - "checks_hits_successes": 44048, - "checks_hits_total": 44048, - "checks_successes": 2, - "checks_total": 2, - "http_req_duration_avg": 21.3739818525472, - "http_reqs_avg": 716.2276422764228, - "test_run_id": 0, - "thresholds_successes": 2, - "thresholds_total": 3, - "urls_hits_successes": 88096, - "urls_hits_total": 88096, - "urls_successes": 4, - "urls_total": 4 - } - ] + "k6-run-overviews": [ + { + "checks_hits_successes": 44048, + "checks_hits_total": 44048, + "checks_successes": 2, + "checks_total": 2, + "http_req_duration_avg": 21.3739818525472, + "http_reqs_avg": 716.2276422764228, + "test_run_id": 0, + "thresholds_successes": 2, + "thresholds_total": 3, + "urls_hits_successes": 88096, + "urls_hits_total": 88096, + "urls_successes": 4, + "urls_total": 4 + } + ] } ``` @@ -276,21 +274,20 @@ Exports metric data for test run in CSV format. URL to the file is available in **POST** `/loadtests/v2/runs/{test_run_id}/export` -| Path Parameter | Type | Description | -| ----------| ---- | ----------- | -| test_run_id | integer | A unique integer value identifying this test run. | - +| Path Parameter | Type | Description | +| -------------- | ------- | ------------------------------------------------- | +| test_run_id | integer | A unique integer value identifying this test run. |
```json { - "exports": [ - { - "export_status": 1, - "load_test_run_id": 0 - } - ] + "exports": [ + { + "export_status": 1, + "load_test_run_id": 0 + } + ] } ``` diff --git a/src/templates/docs/integrations.js b/src/templates/docs/integrations.js index 5b26d3d320..c931ed761c 100644 --- a/src/templates/docs/integrations.js +++ b/src/templates/docs/integrations.js @@ -55,7 +55,7 @@ const iconsDataSet1 = [ }, { Icon: Datadog, - name: 'DataDog', + name: 'Datadog', to: '/results-visualization/datadog', }, {