diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000000..e69de29bb2 diff --git a/404.html b/404.html new file mode 100644 index 0000000000..05b8f78f85 --- /dev/null +++ b/404.html @@ -0,0 +1,1165 @@ + + + + + + + + + + + + + + + + + + + Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000000..1cf13b9f9d Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.081f42fc.min.js b/assets/javascripts/bundle.081f42fc.min.js new file mode 100644 index 0000000000..32734cd370 --- /dev/null +++ b/assets/javascripts/bundle.081f42fc.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var O=f()(_);return u("cut"),O},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",O=document.createElement("textarea");O.style.fontSize="12pt",O.style.border="0",O.style.padding="0",O.style.margin="0",O.style.position="absolute",O.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return O.style.top="".concat(j,"px"),O.setAttribute("readonly",""),O.value=V,O}var te=function(_,O){var j=A(_);O.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var O=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,O):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,O):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(O){return typeof O}:H=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},O=_.action,j=O===void 0?"copy":O,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(O){return typeof O}:Ie=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var O=0;O<_.length;O++){var j=_[O];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,O){return _&&ro(V.prototype,_),O&&ro(V,O),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(O){return O.__proto__||Object.getPrototypeOf(O)},Wt(V)}function vr(V,_){var O="data-clipboard-".concat(V);if(_.hasAttribute(O))return _.getAttribute(O)}var Ri=function(V){Ci(O,V);var _=Hi(O);function O(j,D){var Y;return _i(this,O),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(O,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),O}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var M=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?v(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return M}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),B(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(b(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),b(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),b(r=>Go.pipe(v(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(b(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),b(t=>en.pipe(v(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(v(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),v(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(b(t=>t?M:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(G("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),v(t=>t.length>0),B(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),v(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(b(r=>r?t():M))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(b(r=>r.text()),m(r=>JSON.parse(r)),B(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),B(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),B(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),B(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function G(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!G("announce.dismiss")||!e.childElementCount)return M;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);G("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),b(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),b(c=>c?r:M),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(b(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(v(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(v(c=>c),ee(s,(c,l)=>l),v(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(v(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(v(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(b(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(v(({active:s})=>s)),i.pipe(_e(250),v(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),v(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),v(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?M:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):M})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||G("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),G("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||G("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),b(f=>f?l:M)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return G("content.lazy")?tt(e).pipe(v(n=>n),Te(1),b(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),v(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(v(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),B(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),v(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),v(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return G("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),tt(e).pipe(b(()=>Na(n)),E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>G("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(b(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return M;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(v(({active:a})=>a)),i.pipe(_e(250),v(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!G("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),b(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),B(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(v(()=>G("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?M:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(b(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),B(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return M;let r=e.target.closest("a");if(r===null)return M;if(r.target||e.metaKey||e.ctrlKey)return M;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):M}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...G("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(b(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),M}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return M;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),b(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),b(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),M)))),b(Xn),b(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(b(()=>e),Z("pathname"),b(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),b(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),b(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:G("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>M)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),b(n=>d(document.body,"click").pipe(v(i=>!i.metaKey&&!i.ctrlKey),ee(o),b(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?M:(i.preventDefault(),I(p))}}return M}),b(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(b(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),B(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),B(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(v(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),b(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),b(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?M:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(v(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(v(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(v(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(v(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(v(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(v(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),v(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>M),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>M),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>M),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return M}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return M}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>M),v(t=>Object.keys(t).length>0),m(t=>({facts:t})),B(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(b(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(G("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),b(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),b(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),G("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(v(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return G("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(b(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),v(o=>o),m(()=>r),Te(1))),v(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title")))})).subscribe(),e.pipe(b(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(b(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(b(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),v(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),b(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),B(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;G("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(v(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),b(e=>Qn(e,{viewport$:Oe,header$:rt})),B(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>G("search.highlight")?mi(e,{index$:Mi,location$:jt}):M),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(b(()=>hs),Pe(ds),B(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.081f42fc.min.js.map + diff --git a/assets/javascripts/bundle.081f42fc.min.js.map b/assets/javascripts/bundle.081f42fc.min.js.map new file mode 100644 index 0000000000..e055db5acc --- /dev/null +++ b/assets/javascripts/bundle.081f42fc.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + +skipper + + +cluster_L + + + + +File: skipper +
+ +File: skipper +Type: cpu +Time: Oct 7, 2020 at 6:22pm (CEST) +Duration: 30.11s, Total samples = 4.28s (14.22%) +Showing nodes accounting for 2.44s, 57.01% of 4.28s total +Dropped 330 nodes (cum <= 0.02s) +Showing top 80 nodes out of 303 + + + + + +N1 + + +http +(*conn) +serve +0 of 2.32s (54.21%) + + + + + +N7 + + +http +serverHandler +ServeHTTP +0 of 2.11s (49.30%) + + + + + +N1->N7 + + + + + + + 2.11s + + + + + +N2 + + +proxy +(*Proxy) +ServeHTTP +0 of 1.83s (42.76%) + + + + + +N4 + + +proxy +(*Proxy) +do +0 of 1.13s (26.40%) + + + + + +N2->N4 + + + + + + + 1.13s + + + + + +N12 + + +time +Now +0 of 0.34s (7.94%) + + + + + +N2->N12 + + + + + + + 0.04s + + + + + +N15 + + +proxy +(*Proxy) +serveResponse +0 of 0.51s (11.92%) + + + + + +N2->N15 + + + + + + + 0.51s + + + + + +N23 + + +runtime +newobject +0.01s (0.23%) +of 0.17s (3.97%) + + + + + +N2->N23 + + + + + + + 0.01s + + + + + +N38 + + +runtime +makeslice +0.01s (0.23%) +of 0.12s (2.80%) + + + + + +N2->N38 + + + + + + + 0.01s + + + + + +N69 + + +runtime +convTstring +0.01s (0.23%) +of 0.05s (1.17%) + + + + + +N2->N69 + + + + + + + 0.01s + + + + + +N3 + + +runtime +systemstack +0 of 0.60s (14.02%) + + + + + +N32 + + +runtime +scanobject +0.16s (3.74%) +of 0.22s (5.14%) + + + + + +N3->N32 + + + + + + + 0.22s + + + + + +N76 + + +runtime +startm +0 of 0.20s (4.67%) + + + + + +N3->N76 + + + + + + + 0.17s + + + + + +N10 + + +proxy +(*Proxy) +makeBackendRequest +0.01s (0.23%) +of 0.39s (9.11%) + + + + + +N4->N10 + + + + + + + 0.39s + + + + + +N4->N12 + + + + + + + 0.02s + + + + + +N24 + + +proxy +(*Proxy) +applyFiltersToResponse +0 of 0.36s (8.41%) + + + + + +N4->N24 + + + + + + + 0.36s + + + + + +N27 + + +proxy +(*Proxy) +applyFiltersToRequest +0 of 0.28s (6.54%) + + + + + +N4->N27 + + + + + + + 0.28s + + + + + +N5 + + +runtime +mallocgc +0.17s (3.97%) +of 0.45s (10.51%) + + + + + +N5->N3 + + + + + + + 0.07s + + + + + +N70 + + +runtime +heapBitsSetType +0.08s (1.87%) + + + + + +N5->N70 + + + + + + + 0.08s + + + + + +N6 + + +runtime +nanotime +0.42s (9.81%) +of 0.43s (10.05%) + + + + + +N7->N2 + + + + + + + 1.83s + + + + + +N68 + + +promhttp +HandlerFor +func1 +0 of 0.28s (6.54%) + + + + + +N7->N68 + + + + + + + 0.28s + + + + + +N8 + + +runtime +futex +0.41s (9.58%) + + + + + +N9 + + +bufio +(*Writer) +Flush +0 of 0.36s (8.41%) + + + + + +N21 + + +net +(*conn) +Write +0 of 0.42s (9.81%) + + + + + +N9->N21 + + + + + + + 0.12s + + + + + +N74 + + +http +checkConnErrorWriter +Write +0 of 0.27s (6.31%) + + + + + +N9->N74 + + + + + + + 0.24s + + + + + +N75 + + +bufio +(*Writer) +Write +0 of 0.06s (1.40%) + + + + + +N9->N75 + + + + + + + 0.01s + + + + + +N10->N12 + + + + + + + 0.01s + + + + + +N43 + + +http +(*persistConn) +roundTrip +0.01s (0.23%) +of 0.11s (2.57%) + + + + + +N10->N43 + + + + + + + 0.10s + + + + + +N45 + + +eskip +(*Route) +Print +0 of 0.14s (3.27%) + + + + + +N10->N45 + + + + + + + 0.10s + + + + + +N57 + + +runtime +mapassign_faststr +0.01s (0.23%) +of 0.07s (1.64%) + + + + + +N10->N57 + + + + + + + 0.04s + + + + + +N71 + + +lightstep-tracer-go +(*spanImpl) +LogKV +0 of 0.24s (5.61%) + + + + + +N10->N71 + + + + + + + 0.01s + + + + + +N11 + + +syscall +Syscall +0.56s (13.08%) +of 0.64s (14.95%) + + + + + +N11->N3 + + + + + + + 0.07s + + + + + +N29 + + +time +now +0.01s (0.23%) +of 0.34s (7.94%) + + + + + +N12->N29 + + + + + + + 0.34s + + + + + +N13 + + +runtime +findrunnable +0.04s (0.93%) +of 0.39s (9.11%) + + + + + +N50 + + +runtime +checkTimers +0 of 0.18s (4.21%) + + + + + +N13->N50 + + + + + + + 0.09s + + + + + +N73 + + +runtime +epollwait +0.07s (1.64%) + + + + + +N13->N73 + + + + + + + 0.07s + + + + + +N14 + + +proxy +copyStream +0.01s (0.23%) +of 0.32s (7.48%) + + + + + +N36 + + +proxy +(*proxyTracing) +logEvent +0 of 0.25s (5.84%) + + + + + +N14->N36 + + + + + + + 0.04s + (inline) + + + + + +N14->N38 + + + + + + + 0.05s + + + + + +N42 + + +logging +(*LoggingWriter) +Flush +0 of 0.25s (5.84%) + + + + + +N14->N42 + + + + + + + 0.10s + + + + + +N59 + + +http +(*body) +Read +0 of 0.16s (3.74%) + + + + + +N14->N59 + + + + + + + 0.07s + + + + + +N15->N12 + + + + + + + 0.01s + + + + + +N15->N14 + + + + + + + 0.32s + + + + + +N15->N42 + + + + + + + 0.15s + + + + + +N15->N57 + + + + + + + 0.02s + + + + + +N16 + + +fmt +Sprintf +0 of 0.18s (4.21%) + + + + + +N55 + + +runtime +memmove +0.05s (1.17%) + + + + + +N16->N55 + + + + + + + 0.01s + + + + + +N58 + + +runtime +getitab +0.03s (0.7%) +of 0.06s (1.40%) + + + + + +N16->N58 + + + + + + + 0.01s + + + + + +N65 + + +strconv +genericFtoa +0.02s (0.47%) +of 0.08s (1.87%) + + + + + +N16->N65 + + + + + + + 0.03s + + + + + +N67 + + +runtime +slicebytetostring +0.01s (0.23%) +of 0.05s (1.17%) + + + + + +N16->N67 + + + + + + + 0.02s + + + + + +N17 + + +http +(*persistConn) +writeLoop +0 of 0.38s (8.88%) + + + + + +N17->N9 + + + + + + + 0.05s + + + + + +N25 + + +http +(*Request) +write +0 of 0.31s (7.24%) + + + + + +N17->N25 + + + + + + + 0.31s + + + + + +N18 + + +poll +ignoringEINTR +0 of 0.62s (14.49%) + + + + + +N18->N11 + + + + + + + 0.62s + + + + + +N19 + + +proxy +tryCatch +0 of 0.34s (7.94%) + + + + + +N34 + + +proxy +(*Proxy) +applyFiltersToResponse +func1 +0 of 0.23s (5.37%) + + + + + +N19->N34 + + + + + + + 0.23s + + + + + +N51 + + +proxy +(*Proxy) +applyFiltersToRequest +func1 +0 of 0.11s (2.57%) + + + + + +N19->N51 + + + + + + + 0.11s + + + + + +N20 + + +runtime +mcall +0.01s (0.23%) +of 0.51s (11.92%) + + + + + +N35 + + +runtime +schedule +0 of 0.49s (11.45%) + + + + + +N20->N35 + + + + + + + 0.49s + + + + + +N21->N18 + + + + + + + 0.40s + + + + + +N22 + + +http +(*persistConn) +readLoop +0 of 0.17s (3.97%) + + + + + +N22->N12 + + + + + + + 0.01s + + + + + +N22->N23 + + + + + + + 0.01s + + + + + +N30 + + +runtime +selectgo +0.03s (0.7%) +of 0.13s (3.04%) + + + + + +N22->N30 + + + + + + + 0.02s + + + + + +N31 + + +bufio +(*Reader) +fill +0 of 0.14s (3.27%) + + + + + +N22->N31 + + + + + + + 0.08s + + + + + +N22->N67 + + + + + + + 0.01s + + + + + +N23->N5 + + + + + + + 0.16s + + + + + +N24->N12 + + + + + + + 0.05s + + + + + +N24->N19 + + + + + + + 0.23s + + + + + +N66 + + +proxy +(*proxyTracing) +logFilterEvent +0 of 0.20s (4.67%) + + + + + +N24->N66 + + + + + + + 0.06s + (inline) + + + + + +N25->N9 + + + + + + + 0.07s + + + + + +N39 + + +io +Copy +0 of 0.21s (4.91%) + + + + + +N25->N39 + + + + + + + 0.12s + (inline) + + + + + +N25->N58 + + + + + + + 0.01s + + + + + +N25->N69 + + + + + + + 0.01s + + + + + +N26 + + +io +copyBuffer +0 of 0.21s (4.91%) + + + + + +N26->N21 + + + + + + + 0.03s + + + + + +N26->N38 + + + + + + + 0.01s + + + + + +N26->N39 + + + + + + + 0.12s + (inline) + + + + + +N56 + + +bytes +(*Buffer) +ReadFrom +0 of 0.09s (2.10%) + + + + + +N26->N56 + + + + + + + 0.09s + + + + + +N72 + + +io +(*LimitedReader) +Read +0 of 0.08s (1.87%) + + + + + +N26->N72 + + + + + + + 0.08s + + + + + +N27->N12 + + + + + + + 0.02s + + + + + +N27->N19 + + + + + + + 0.11s + + + + + +N27->N66 + + + + + + + 0.14s + (inline) + + + + + +N28 + + +net +(*conn) +Read +0 of 0.23s (5.37%) + + + + + +N28->N18 + + + + + + + 0.21s + + + + + +N29->N6 + + + + + + + 0.15s + + + + + +N33 + + +runtime +walltime +0.17s (3.97%) +of 0.18s (4.21%) + + + + + +N29->N33 + + + + + + + 0.18s + (inline) + + + + + +N30->N3 + + + + + + + 0.09s + + + + + +N31->N59 + + + + + + + 0.01s + + + + + +N64 + + +http +(*persistConn) +Read +0 of 0.14s (3.27%) + + + + + +N31->N64 + + + + + + + 0.09s + + + + + +N34->N30 + + + + + + + 0.05s + + + + + +N47 + + +apiusagemonitoring +(*apiUsageMonitoringFilter) +Response +0 of 0.12s (2.80%) + + + + + +N34->N47 + + + + + + + 0.12s + + + + + +N35->N13 + + + + + + + 0.39s + + + + + +N35->N50 + + + + + + + 0.09s + + + + + +N36->N23 + + + + + + + 0.01s + + + + + +N36->N69 + + + + + + + 0.01s + + + + + +N36->N71 + + + + + + + 0.23s + + + + + +N37 + + +kubernetes +(*Client) +LoadUpdate +0 of 0.31s (7.24%) + + + + + +N37->N45 + + + + + + + 0.04s + + + + + +N60 + + +regexp +(*Regexp) +doExecute +0.02s (0.47%) +of 0.07s (1.64%) + + + + + +N37->N60 + + + + + + + 0.02s + + + + + +N78 + + +kubernetes +(*clusterClient) +fetchClusterState +0 of 0.24s (5.61%) + + + + + +N37->N78 + + + + + + + 0.24s + + + + + +N38->N5 + + + + + + + 0.11s + + + + + +N39->N26 + + + + + + + 0.21s + + + + + +N40 + + +runtime +notewakeup +0.01s (0.23%) +of 0.28s (6.54%) + + + + + +N40->N8 + + + + + + + 0.27s + + + + + +N41 + + +routing +receiveFromClient +0 of 0.31s (7.24%) + + + + + +N41->N37 + + + + + + + 0.31s + + + + + +N42->N58 + + + + + + + 0.01s + + + + + +N80 + + +http +(*chunkWriter) +flush +0 of 0.24s (5.61%) + + + + + +N42->N80 + + + + + + + 0.24s + + + + + +N43->N6 + + + + + + + 0.01s + + + + + +N43->N30 + + + + + + + 0.02s + + + + + +N62 + + +runtime +mapaccess1_faststr +0.03s (0.7%) +of 0.05s (1.17%) + + + + + +N43->N62 + + + + + + + 0.01s + + + + + +N44 + + +http +(*body) +readLocked +0 of 0.16s (3.74%) + + + + + +N49 + + +bufio +(*Reader) +Read +0.01s (0.23%) +of 0.10s (2.34%) + + + + + +N44->N49 + + + + + + + 0.06s + + + + + +N44->N72 + + + + + + + 0.04s + + + + + +N45->N69 + + + + + + + 0.01s + + + + + +N79 + + +eskip +argsString +0 of 0.10s (2.34%) + + + + + +N45->N79 + + + + + + + 0.10s + + + + + +N46 + + +expfmt +MetricFamilyToText +0 of 0.24s (5.61%) + + + + + +N46->N55 + + + + + + + 0.01s + + + + + +N63 + + +flate +(*compressor) +deflate +0.05s (1.17%) +of 0.12s (2.80%) + + + + + +N46->N63 + + + + + + + 0.12s + + + + + +N46->N65 + + + + + + + 0.05s + + + + + +N47->N6 + + + + + + + 0.02s + + + + + +N47->N23 + + + + + + + 0.01s + + + + + +N54 + + +json +Unmarshal +0 of 0.18s (4.21%) + + + + + +N47->N54 + + + + + + + 0.04s + + + + + +N47->N60 + + + + + + + 0.02s + + + + + +N47->N69 + + + + + + + 0.01s + + + + + +N48 + + +runtime +gcBgMarkWorker +0 of 0.25s (5.84%) + + + + + +N48->N3 + + + + + + + 0.25s + + + + + +N49->N64 + + + + + + + 0.05s + + + + + +N50->N6 + + + + + + + 0.17s + (inline) + + + + + +N50->N76 + + + + + + + 0.01s + + + + + +N51->N23 + + + + + + + 0.01s + + + + + +N51->N30 + + + + + + + 0.03s + + + + + +N51->N43 + + + + + + + 0.01s + + + + + +N52 + + +lightstep-tracer-go +(*tracerImpl) +reportLoop +0 of 0.15s (3.50%) + + + + + +N52->N23 + + + + + + + 0.07s + + + + + +N52->N38 + + + + + + + 0.01s + + + + + +N53 + + +kubernetes +(*clusterClient) +getJSON +0 of 0.23s (5.37%) + + + + + +N53->N39 + + + + + + + 0.09s + (inline) + + + + + +N53->N54 + + + + + + + 0.14s + + + + + +N77 + + +json +(*decodeState) +value +0 of 0.10s (2.34%) + + + + + +N54->N77 + + + + + + + 0.10s + + + + + +N56->N31 + + + + + + + 0.01s + + + + + +N56->N38 + + + + + + + 0.02s + + + + + +N56->N55 + + + + + + + 0.01s + + + + + +N57->N23 + + + + + + + 0.02s + (inline) + + + + + +N59->N44 + + + + + + + 0.16s + + + + + +N61 + + +lightstep-tracer-go +(*spanImpl) +LogFields +0.01s (0.23%) +of 0.22s (5.14%) + + + + + +N61->N5 + + + + + + + 0.03s + + + + + +N61->N12 + + + + + + + 0.17s + + + + + +N63->N55 + + + + + + + 0.01s + + + + + +N63->N75 + + + + + + + 0.01s + + + + + +N64->N28 + + + + + + + 0.13s + + + + + +N64->N56 + + + + + + + 0.01s + + + + + +N66->N36 + + + + + + + 0.20s + (inline) + + + + + +N67->N5 + + + + + + + 0.03s + + + + + +N67->N55 + + + + + + + 0.01s + + + + + +N68->N23 + + + + + + + 0.02s + + + + + +N68->N46 + + + + + + + 0.24s + + + + + +N69->N5 + + + + + + + 0.04s + + + + + +N71->N38 + + + + + + + 0.02s + + + + + +N71->N61 + + + + + + + 0.22s + + + + + +N72->N49 + + + + + + + 0.04s + + + + + +N72->N59 + + + + + + + 0.08s + + + + + +N74->N21 + + + + + + + 0.27s + + + + + +N75->N9 + + + + + + + 0.03s + + + + + +N75->N74 + + + + + + + 0.03s + + + + + +N76->N40 + + + + + + + 0.20s + + + + + +N77->N67 + + + + + + + 0.01s + + + + + +N78->N53 + + + + + + + 0.23s + + + + + +N79->N16 + + + + + + + 0.09s + + + + + +N80->N9 + + + + + + + 0.20s + + + + + \ No newline at end of file diff --git a/img/svc-to-svc-tokeninfo.svg b/img/svc-to-svc-tokeninfo.svg new file mode 100644 index 0000000000..baaefa4024 --- /dev/null +++ b/img/svc-to-svc-tokeninfo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/img/svc-to-svc-tokenintrospection.svg b/img/svc-to-svc-tokenintrospection.svg new file mode 100644 index 0000000000..b0b853bd20 --- /dev/null +++ b/img/svc-to-svc-tokenintrospection.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/img/swarm-and-cluster-ratelimit.svg b/img/swarm-and-cluster-ratelimit.svg new file mode 100644 index 0000000000..1d30e42e1b --- /dev/null +++ b/img/swarm-and-cluster-ratelimit.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 0000000000..b8e428d8a7 --- /dev/null +++ b/index.html @@ -0,0 +1,1295 @@ + + + + + + + + + + + + + + + + + + + + + + + Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Introduction

+

This is the documentation page of +Skipper. Skipper is an HTTP +router and reverse proxy for service composition. It’s designed to +handle large amounts of dynamically configured HTTP route definitions +(>800000 routes) with detailed lookup conditions, and flexible +augmentation of the request flow with filters. It can be used out of +the box or extended with custom lookup, filter logic and configuration +sources.

+

HTTP Proxy

+

Skipper identifies routes based on the requests’ properties, such as path, method, host and headers using the +predicates. It allows the modification of the requests and responses with +filters that are independently configured for +each route. Learn here more about how it works.

+

Kubernetes Ingress

+

Skipper can be used to run as a Kubernetes Ingress controller. Details with examples of Skipper’s capabilities +and an overview can be found in the ingress-controller deployment +docs.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubernetes/deploy/daemonset/daemonset.yaml b/kubernetes/deploy/daemonset/daemonset.yaml new file mode 100644 index 0000000000..5646b0942e --- /dev/null +++ b/kubernetes/deploy/daemonset/daemonset.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: skipper-ingress + namespace: kube-system + labels: + application: skipper-ingress + version: v0.10.180 + component: ingress +spec: + selector: + matchLabels: + application: skipper-ingress + updateStrategy: + type: RollingUpdate + template: + metadata: + name: skipper-ingress + labels: + application: skipper-ingress + version: v0.11.1 + component: ingress + spec: + priorityClassName: system-node-critical + serviceAccountName: skipper-ingress + tolerations: + - key: dedicated + operator: Exists + nodeSelector: + kubernetes.io/role: worker + hostNetwork: true + containers: + - name: skipper-ingress + image: registry.opensource.zalan.do/teapot/skipper:v0.12.0 + ports: + - name: ingress-port + containerPort: 9999 + hostPort: 9999 + - name: metrics-port + containerPort: 9911 + args: + - "skipper" + - "-kubernetes" + - "-kubernetes-in-cluster" + - "-kubernetes-path-mode=path-prefix" + - "-address=:9999" + - "-wait-first-route-load" + - "-proxy-preserve-host" + - "-serve-host-metrics" + - "-enable-ratelimits" + - "-experimental-upgrade" + - "-metrics-exp-decay-sample" + - "-reverse-source-predicate" + - "-lb-healthcheck-interval=3s" + - "-metrics-flavour=codahale,prometheus" + - "-enable-connection-metrics" + - "-max-audit-body=0" + - "-histogram-metric-buckets=.01,.025,.05,.075,.1,.2,.3,.4,.5,.75,1,2,3,4,5,7,10,15,20,30,60,120,300,600" + resources: + requests: + cpu: 150m + memory: 150Mi + readinessProbe: + httpGet: + path: /kube-system/healthz + port: 9999 + initialDelaySeconds: 5 + timeoutSeconds: 5 + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 diff --git a/kubernetes/deploy/daemonset/rbac.yaml b/kubernetes/deploy/daemonset/rbac.yaml new file mode 100644 index 0000000000..7f651db112 --- /dev/null +++ b/kubernetes/deploy/daemonset/rbac.yaml @@ -0,0 +1,108 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: hostnetwork +spec: + hostNetwork: true + hostPorts: + - max: 10000 + min: 50 + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + rule: RunAsAny +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: hostnetwork-psp +rules: +- apiGroups: + - extensions + resourceNames: + - hostnetwork + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: skipper-ingress + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: skipper-ingress +rules: +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list +- apiGroups: [""] + resources: + - namespaces + - services + - endpoints + - pods + verbs: + - get + - list +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list +- apiGroups: + - zalando.org + resources: + - routegroups + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: skipper-ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: skipper-ingress +subjects: +- kind: ServiceAccount + name: skipper-ingress + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: skipper-ingress-hostnetwork-psp + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hostnetwork-psp +subjects: +- kind: ServiceAccount + name: skipper-ingress + namespace: kube-system diff --git a/kubernetes/deploy/demo/deployment.yaml b/kubernetes/deploy/demo/deployment.yaml new file mode 100644 index 0000000000..f7174a3df9 --- /dev/null +++ b/kubernetes/deploy/demo/deployment.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: skipper-demo +spec: + replicas: 2 + selector: + matchLabels: + application: skipper-demo + template: + metadata: + labels: + application: skipper-demo + spec: + containers: + - name: skipper-demo + image: registry.opensource.zalan.do/teapot/skipper:v0.12.0 + args: + - "skipper" + - "-inline-routes" + - "* -> inlineContent(\"

Hello!

\") -> " + ports: + - containerPort: 9090 diff --git a/kubernetes/deploy/demo/svc.yaml b/kubernetes/deploy/demo/svc.yaml new file mode 100644 index 0000000000..55554369c6 --- /dev/null +++ b/kubernetes/deploy/demo/svc.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: skipper-demo + labels: + application: skipper-demo +spec: + type: ClusterIP + ports: + - port: 80 + protocol: TCP + targetPort: 9090 + name: external + selector: + application: skipper-demo diff --git a/kubernetes/deploy/deployment/deployment.yaml b/kubernetes/deploy/deployment/deployment.yaml new file mode 100644 index 0000000000..f15db53466 --- /dev/null +++ b/kubernetes/deploy/deployment/deployment.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: skipper-ingress + namespace: kube-system + labels: + application: skipper-ingress + version: v0.11.40 + component: ingress +spec: + strategy: + rollingUpdate: + maxSurge: 0 + selector: + matchLabels: + application: skipper-ingress + template: + metadata: + labels: + application: skipper-ingress + version: v0.11.40 + component: ingress + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: application + operator: In + values: + - skipper-ingress + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: skipper-ingress + nodeSelector: + kubernetes.io/role: worker + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + containers: + - name: skipper-ingress + image: registry.opensource.zalan.do/teapot/skipper:v0.12.0 + ports: + - name: ingress-port + containerPort: 9999 + hostPort: 9999 + args: + - "skipper" + - "-kubernetes" + - "-kubernetes-in-cluster" + - "-kubernetes-path-mode=path-prefix" + - "-address=:9999" + - "-wait-first-route-load" + - "-proxy-preserve-host" + - "-serve-host-metrics" + - "-disable-metrics-compat" + - "-enable-profile" + - "-enable-ratelimits" + - "-experimental-upgrade" + - "-metrics-exp-decay-sample" + - "-reverse-source-predicate" + - "-lb-healthcheck-interval=3s" + - "-metrics-flavour=prometheus" + - "-enable-connection-metrics" + - "-max-audit-body=0" + - "-histogram-metric-buckets=.0001,.00025,.0005,.00075,.001,.0025,.005,.0075,.01,.025,.05,.075,.1,.2,.3,.4,.5,.75,1,2,3,4,5,7,10,15,20,30,60,120,300,600" + - "-expect-continue-timeout-backend=30s" + - "-keepalive-backend=30s" + - "-max-idle-connection-backend=0" + - "-response-header-timeout-backend=1m" + - "-timeout-backend=1m" + - "-tls-timeout-backend=1m" + - "-close-idle-conns-period=20s" + - "-idle-timeout-server=62s" + - "-read-timeout-server=5m" + - "-write-timeout-server=60s" + - '-default-filters-prepend=enableAccessLog(4,5) -> lifo(2000,20000,"3s")' + resources: + limits: + cpu: "4" + memory: "1Gi" + requests: + cpu: "4" + memory: "1Gi" + readinessProbe: + httpGet: + path: /kube-system/healthz + port: 9999 + initialDelaySeconds: 60 + timeoutSeconds: 5 + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 diff --git a/kubernetes/deploy/deployment/hpa.yaml b/kubernetes/deploy/deployment/hpa.yaml new file mode 100644 index 0000000000..ccb2431a46 --- /dev/null +++ b/kubernetes/deploy/deployment/hpa.yaml @@ -0,0 +1,23 @@ +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: skipper-ingress + namespace: kube-system + labels: + application: skipper-ingress +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: skipper-ingress + minReplicas: 3 + maxReplicas: 50 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 70 + - type: Resource + resource: + name: memory + targetAverageUtilization: 70 diff --git a/kubernetes/deploy/deployment/rbac.yaml b/kubernetes/deploy/deployment/rbac.yaml new file mode 100644 index 0000000000..7f651db112 --- /dev/null +++ b/kubernetes/deploy/deployment/rbac.yaml @@ -0,0 +1,108 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: hostnetwork +spec: + hostNetwork: true + hostPorts: + - max: 10000 + min: 50 + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + rule: RunAsAny +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: hostnetwork-psp +rules: +- apiGroups: + - extensions + resourceNames: + - hostnetwork + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: skipper-ingress + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: skipper-ingress +rules: +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list +- apiGroups: [""] + resources: + - namespaces + - services + - endpoints + - pods + verbs: + - get + - list +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list +- apiGroups: + - zalando.org + resources: + - routegroups + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: skipper-ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: skipper-ingress +subjects: +- kind: ServiceAccount + name: skipper-ingress + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: skipper-ingress-hostnetwork-psp + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hostnetwork-psp +subjects: +- kind: ServiceAccount + name: skipper-ingress + namespace: kube-system diff --git a/kubernetes/deploy/deployment/service.yaml b/kubernetes/deploy/deployment/service.yaml new file mode 100644 index 0000000000..557b292af8 --- /dev/null +++ b/kubernetes/deploy/deployment/service.yaml @@ -0,0 +1,19 @@ +kind: Service +apiVersion: v1 +metadata: + name: skipper-ingress + namespace: kube-system + labels: + application: skipper-ingress + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "9911" + prometheus.io/scrape: "true" +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 9999 + protocol: TCP + selector: + application: skipper-ingress diff --git a/kubernetes/east-west-usage/index.html b/kubernetes/east-west-usage/index.html new file mode 100644 index 0000000000..94f6605514 --- /dev/null +++ b/kubernetes/east-west-usage/index.html @@ -0,0 +1,1359 @@ + + + + + + + + + + + + + + + + + + + + + + + + + East-West aka svc-to-svc - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

East-West aka svc-to-svc

+ +

East-West Usage

+

If you run Skipper with an East-West +setup, +you can use the configured ingress also to do service-to-service +calls, bypassing your ingress loadbalancer and stay inside the +cluster. You can connect via HTTP to your application based on its +ingress configuration.

+

Example:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: demo
+  namespace: default
+spec:
+  rules:
+  - host: demo.skipper.cluster.local
+    http:
+      paths:
+      - backend:
+          service:
+            name: example
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Or as a RouteGroup:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: demo
+  namespace: default
+spec:
+  hosts:
+  - demo.skipper.cluster.local
+  backends:
+  - name: backend
+    type: service
+    serviceName: example
+    servicePort: 80
+  defaultBackends:
+  - backendName: backend
+
+

Your clients inside the cluster should call this example with +demo.skipper.cluster.local in their host header. Example +from inside a container:

+
curl http://demo.skipper.cluster.local/
+
+

You can also use the same ingress or RouteGroup object to accept +internal and external traffic:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: demo
+  namespace: default
+spec:
+  rules:
+  - host: demo.example.com
+    http:
+      paths:
+      - backend:
+          service:
+            name: example
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+  - host: demo.skipper.cluster.local
+    http:
+      paths:
+      - backend:
+          service:
+            name: example
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Or, again, as a RouteGroup:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: demo
+  namespace: default
+spec:
+  hosts:
+  - demo.skipper.cluster.local
+  - demo.example.com
+  backends:
+  - name: backend
+    type: service
+    serviceName: example
+    servicePort: 80
+  defaultBackends:
+  - backendName: backend
+
+

Metrics will change, because skipper stores metrics per HTTP Host +header, which changes with cluster internal calls from +demo.example.org to demo.default.skipper.cluster.local.

+

You can use all features as defined in Ingress +Usage, Filters, +Predicates via annotations as +before and also custom-routes.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubernetes/external-addresses/index.html b/kubernetes/external-addresses/index.html new file mode 100644 index 0000000000..2c3ea3812f --- /dev/null +++ b/kubernetes/external-addresses/index.html @@ -0,0 +1,1307 @@ + + + + + + + + + + + + + + + + + + + + + + + + + External Addresses aka External Name - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

External Addresses (External Name)

+

In Kubernetes, it is possible to define services with external names (type=ExternalName). For ingress objects, +Skipper supports these services, and generates routes from the ingress objects that reference one or more +external name service, that will have a backend pointing to the network address defined by the specified +service.

+

Route groups don’t support services of type ExternalName, but they support network backends, and even LB +backends with explicit endpoints with custom endpoint addresses. This way, it is possible to achieve the same +with route groups.

+

For both the ingress objects and the route groups, the accepted external addresses must be explicitly allowed by +listing regexp expressions of which at least one must be matched by the domain name of these addresses. The +allow list is a startup option, defined via command line flags or in the configuration file. Enforcing this +list happens only in the Kubernetes Ingress mode of Skipper.

+

Specifying allowed external names via command line flags

+

For compatibility reasons, the validation needs to be enabled with an explitic toggle:

+
skipper -kubernetes \
+-kubernetes-only-allowed-external-names \
+-kubernetes-allowed-external-name "^one[.]example[.]org$" \
+-kubernetes-allowed-external-name "^two[.]example[.]org$"
+
+

Specifying allowed external names via a config file

+

For compatibility reasons, the validation needs to be enabled with an explitic toggle:

+
kubernetes-only-allowed-external-names: true
+kubernetes-allowed-external-names:
+- ^one[.]example[.]org$
+- ^two[.]example[.]org$
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubernetes/ingress-backends/index.html b/kubernetes/ingress-backends/index.html new file mode 100644 index 0000000000..7aa88041b7 --- /dev/null +++ b/kubernetes/ingress-backends/index.html @@ -0,0 +1,1388 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Ingress Backends - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Backend Deployments

+

Kubernetes Race Condition problem

+

As described in #652, +there is a problem that exists in Kubernetes, while terminating Pods. +Terminating Pods could be graceful, but the nature of distributed +environments will show failures, because not all components in the +distributed system changed already their state. When a Pod terminates, +the controller-manager has to update the endpoints of the Kubernetes +service. Additionally Skipper has to get this endpoints +list. Skipper polls the kube-apiserver every -source-poll-timeout=<ms>, +which defaults to 3000. +Reducing this interval or implementing watch will only reduce the +timeframe, but not fix the underlying race condition.

+

Mitigation strategies can be different and the next section document +strategies for application developers to mitigate the problem.

+

Teardown strategies

+

An application that is target of an ingress can circumvent HTTP code +504s Gateway Timeouts with these strategies:

+
    +
  1. use Pod lifecycle hooks
  2. +
  3. use a SIGTERM handler to switch readinessProbe to unhealthy and +exit later, or just wait for SIGKILL terminating the process.
  4. +
+

Pod Lifecycle Hooks

+

Kubernetes Pod Lifecycle +Hooks +in the Pod spec can have a preStop command which executes for +example a binary. The following will execute the binary sleep with +argument 20 to wait 20 seconds before terminating the containers +within the Pod:

+
lifecycle:
+  preStop:
+    exec:
+      command: ["sleep","20"]
+
+

20 seconds should be enough to fade your Pod out of the endpoints list +and Skipper’s routing table.

+

SIGTERM handling in Containers

+

An application can implement a SIGTERM handler, that changes the +readinessProbe target to unhealthy for the application +instance. This will make sure it will be deleted from the endpoints +list and from Skipper’s routing table. Similar to Pod Lifecycle +Hooks you could sleep 20 seconds and after that +terminate your application or you just wait until SIGKILL will cleanup +the instance after 60s.

+
go func() {
+    var sigs chan os.Signal
+    sigs = make(chan os.Signal, 1)
+    signal.Notify(sigs, syscall.SIGTERM)
+    for {
+        select {
+            case <-sigs:
+               healthCheck = unhealthy
+               time.Sleep(20*time.Second)
+               os.Exit(0)
+        }
+    }
+}()
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubernetes/ingress-controller/index.html b/kubernetes/ingress-controller/index.html new file mode 100644 index 0000000000..516fa7664e --- /dev/null +++ b/kubernetes/ingress-controller/index.html @@ -0,0 +1,3013 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Ingress Controller Deployment - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Skipper Ingress Controller

+

This documentation is meant for cluster operators and describes +how to install Skipper as Ingress-Controller in your Kubernetes +Cluster.

+

Why you should use Skipper as ingress controller?

+

Baremetal load balancers perform really well, but their configuration is +not updated frequently and most of the installations are not meant +for rapid change. With the introduction of Kubernetes this assumption is +no longer valid and there was a need for a HTTP router which supported +backend routes which changed very frequently. Skipper was initially designed +for a rapidly changing routing tree and subsequently used to implement +an ingress controller in Kubernetes.

+

Cloud load balancers scale well and can be updated frequently, but do not +provide many features. Skipper has advanced resiliency and deployment +features, which you can use to enhance your environment. For example, +ratelimiters, circuitbreakers, blue-green deployments, shadow traffic +and more.

+

Comparison with other Ingress Controllers

+

At Zalando we chose to run kube-ingress-aws-controller +with skipper ingress +as the target group. While AWS load balancers give us features +like TLS termination, automated certificate rotation, possible WAF, +and Security Groups, +the HTTP routing capabilities are very limited. Skipper’s main advantage +compared to other HTTP routers is matching and changing HTTP. Another advantage +for us and for skipper users in general is that defaults with +kube-ingress-aws-controller +just work as you would expect. +For lower latency, safety, and cost reasons you can also use Network +Load Balancer (NLB) instead of Application Load Balancer (ALB). +We tested two cases (Skipper backends were pre-scaled and not changed):

+
    +
  1. A hard switch to a cold NLB with 1 million requests per second +(RPS). A similar test with 100k RPS with ALB results in client visible +error rates and high latency percentiles.
  2. +
  3. A 6h test with 2k RPS showed regular spikes in p999 latency to more +than 100ms in for ALB. NLB showed a flat p999 latency of 25-35ms for +the same workload.
  4. +
+

There are a number of other ingress controllers including +traefik, +nginx, +haproxy or +aws-alb-ingress-controller. +Why not one of these?

+

HAproxy and Nginx are well understood and +good TCP/HTTP proxies, that were built before Kubernetes. As a result, the first drawback is +their reliance on static configuration files which comes from a time when routes and their +configurations were relatively static. Secondly, the list of annotations to implement even +basic features are already quite a big list for users. Skipper was built to support dynamically +changing route configurations, which happens quite often in Kubernetes. Other advantage of +using Skipper is that we are able to easily implement automated canary deployments, +automated blue-green deployments +or shadow traffic.

+

However there are some features that have better support in aws-alb-ingress-controller, +HAproxy and nginx. For instance the sendfile() +operation. If you need to stream a large file or large amount of files, then you may want to +go for one of these options.

+

aws-alb-ingress-controller directly routes traffic to your Kubernetes services, which is +both good and bad, because it can reduce latency, but comes with the risk of depending on +kube-proxy routing. kube-proxy routing can take up to 30 seconds, ETCD ttl, for finding +pods from dead nodes. In Skipper we passively observe errors from endpoints and are able to +drop these from the load balancer members. We add these to an actively checked member pool, +which will enable endpoints if these are healthy again from skipper’s point of view. +Additionally the aws-alb-ingress-controller does not support features like ALB sharing, +or Server Name Indication which can reduce +costs. Features like path rewriting +are also not currently supported.

+

Traefik has a good community and support for Kubernetes. Skipper originates from +Project Mosaic which was started in 2015. Back then Traefik +was not yet a mature project and still had time to go before the v1.0.0 release. +Traefik also does not currently support our OpenTracing provider. +It also did not support traffic splitting when we started stackset-controller +for automated traffic switching. We have also recently done significant work on running +Skipper as API gateway within Kubernetes, which could potentially help many teams that +run many small services on Kubernetes. Skipper predicates and filters are a powerful +abstraction which can enhance the system easily.

+

Comparison with service mesh

+

Why run Skipper and not Istio, Linkerd or other +service-mesh solutions?

+

Skipper has a Kubernetes native integration, which is reliable, proven in production since +end of 2015 as of March 2019 run in 112 Kubernetes clusters at Zalando. Skipper already has +most of the features provided +by service meshes:

+
    +
  • Authentication/Authorization in +Kubernetes ingress, +and can also integrate a custom service with webhook
  • +
  • Diagnosis tools +that support latency, bandwidth throttling, random content and more.
  • +
  • Rich Metrics which + you can enable and disable in the Prometheus format.
  • +
  • Support for different Opentracing providers +including jaeger, lightstep and instana
  • +
  • Ratelimits support +with cluster ratelimits as an pending solution, which enables you to stop login attacks easily
  • +
  • Connects to endpoints directly, instead of using Kubernetes services
  • +
  • Retries requests, if the request can be safely retried, which is only the case if the error +happens on the TCP/IP connection establishment or a backend whose requests are defined as +idempotent.
  • +
  • Simple East-West Communication +which enables proper communication paths without the need of yet another tool to do service +discovery. See how to run skipper as API Gateway with East-West setup, +if you want to run this powerful setup. Kubernetes, Skipper and DNS are the service discovery +in this case.
  • +
  • Blue-green deployments + with automation if you like to use stackset-controller
  • +
  • shadow-traffic + to determine if the new version is able to handle the traffic the same as the old one
  • +
  • A simple way to do A/B tests
  • +
  • You are free to use cloud providers TLS terminations and certificate rotation, which is +reliable and secure. Employees cannot download private keys and certificates are certified +by a public CA. Many mTLS setups rely on insecure CA handling and are hard to debug in case of + failure.
  • +
  • We are happy to receive issues and pull requests in our repository, but if you need a feature +which can not be implemented upstream, you are also free to use skipper as a library and +create internal features to do whatever you want.
  • +
+

With Skipper you do not need to choose to go all-in and you are able to add features as soon +as you need or are comfortable.

+

What is an Ingress-Controller?

+

Ingress-controllers are serving http requests into a Kubernetes +cluster. Most of the time traffic will pass through ingress and go to the +Kubernetes endpoints of the respective pods. +For having a successful ingress, you need to have a DNS name pointing +to a set of stable IP addresses that act as a load balancer.

+

Skipper as ingress-controller:

+
    +
  • cloud: deploy behind the cloud load balancer
  • +
  • baremetal: deploy behind your hardware/software load balancer and have all skipper as members in one pool.
  • +
+

You would point your DNS entries to the +load balancer in front of skipper, for example automated using +external-dns.

+

Why Skipper uses EndpointSlices or Endpoints and not Services?

+

Skipper does not use the ClusterIP of Kubernetes +Services to route +traffic to the pods. Instead it uses the Endpointslices or Endpoints +API to bypass kube-proxy created iptables to remove overhead like +conntrack entries for iptables DNAT. Skipper can also reuse +connections to Pods, such that you have no overhead in establishing +connections all the time. To prevent errors on node failures, Skipper +also does automatic retries to another endpoint in case it gets a +connection refused or TLS handshake error to the endpoint. Other +reasons are future support of features like session affinity, +different load balancer algorithms or distributed loadbalancing also +known as service mesh.

+

Using EndpointSlices instead of Endpoints

+

EndpointSlices +provide the ability to +scale beyond 1000 +load balancer members in one pool.

+

To enable EndpointSlices you need to run skipper or routesrv with +-enable-kubernetes-endpointslices=true.

+

Using Services instead of Endpoints

+

While using Endpoints is the preferred way of using Skipper as an +ingress controller as described in the section above, there might be +edge cases that require the use of Kubernetes +Services instead.

+

An example of scenario where you might need to use Services is when you rely +on Istio networking features to connect multiple clusters, as the IPs of +Kubernetes Endpoints will not resolve in all cases.

+

If you find yourself in this category, you can override the default behaviour +by setting the KubernetesForceService flag to true in the Skipper.Options struct. +This will cause Skipper to create routes with BackendType=eskip.NetworkBackend instead +of BackendType=eskip.LBBackend and use the following address format: +http://<service name>.<namespace>.svc.cluster.local:<port>. See the Kubernetes Service DNS +documentation +for more information.

+

AWS deployment

+

In AWS, this could be an ALB with DNS pointing to the ALB. The ALB can +then point to an ingress-controller running on an EC2 node and uses +Kubernetes hostnetwork port specification in the Pod spec.

+

A logical overview of the traffic flow in AWS is shown in this picture:

+

logical ingress-traffic-flow

+

We described that Skipper bypasses Kubernetes Service and uses directly +endpoints for good reasons, +therefore the real traffic flow is shown in the next picture. +technical ingress-traffic-flow

+

Baremetal deployment

+

In datacenter, baremetal environments, you probably have a hardware +load balancer or some haproxy or nginx setup, that serves most of your +production traffic and DNS points to these endpoints. For example +*.ingress.example.com could point to your virtual server IPs in front +of ingress. Skippers could be used as pool members, which do the http +routing. Your load balancer of choice could have a wildcard certificate +for *.ingress.example.com and DNS for this would point to your +load balancer. You can also automate DNS records with +external-dns, +if you for example use PowerDNS as provider and have a load balancer +controller that modifies the status field in ingress to your +load balancer virtual IP.

+

ingress-traffic-flow

+

RouteSRV

+

In kubernetes skipper-ingress fetches ingress/routegroup configurations every 3s, with high number of skipper pods ~100 we faced issues with kube-apiserver. At which we introduced RouteSRV, which will serve as a layer between kube-apiserver and skipper ingress, so it will give us more flexiability in scaling skipper-ingress without affecting k8s-apiserver

+

Kubernetes dataclient as routes source

+
  graph TD;
+      kapis(kubeapiserver) --fetches ingresses--> s(skipper);
+

Kubernetes with RouteSRV as routes source

+
  graph TD;
+  kapis(kubeapiserver) --fetches ingresses--> s(routesrv) --fetches routes--> d1(skipper1) & d2(skipper2);
+

Requirements

+

In general for one endpoint you need, a DNS A/AAAA record pointing to +one or more load balancer IPs. Skipper is best used behind this +layer 4 load balancer to route and manipulate HTTP data.

+

minimal example:

+
    +
  • layer 4 load balancer has 1.2.3.4:80 as socket for a virtual server pointing to all skipper ingress
  • +
  • *.ingress.example.com points to 1.2.3.4
  • +
  • ingress object with host entry for myapp.ingress.example.com targets a service type ClusterIP
  • +
  • service type ClusterIP has a selector that targets your Pods of your myapp deployment
  • +
+

TLS example:

+
    +
  • same as before, but you would terminate TLS on your layer 4 load balancer
  • +
  • layer 4 load balancer has 1.2.3.4:443 as socket for a virtual server
  • +
  • you can use an automated redirect for all port 80 requests to https with -kubernetes-https-redirect +and change the default redirect code with -kubernetes-https-redirect-code
  • +
+

Install Skipper as ingress-controller

+

You should have a base understanding of Kubernetes and +Ingress.

+

Prerequisites:

+
    +
  1. You should checkout the git repository to have access to the +manifests: git clone https://github.com/zalando/skipper.git
  2. +
  3. You should enter the cloned directory: cd skipper
  4. +
  5. You have to choose how to install skipper-ingress. You can install +it as dameonset or as deployment.
  6. +
+

Beware, in order to get traffic from the internet, we would need to +have a load balancer in front to direct all traffic to skipper. Skipper +will route the traffic based on ingress objects. The load balancer +should have a HTTP health check, that does a GET request to +/kube-system/healthz on all Kubernetes worker nodes. This method is +simple and used successfully in production. In AWS you can run +kube-ingress-aws-controller +to create these load balancers automatically based on the ingress +definition.

+

Deployment style

+

Follow the deployment style you like: dameonset or deployment.

+

Daemonset

+

We start to deploy skipper-ingress as a daemonset, use hostNetwork and +expose the TCP port 9999 on each Kubernetes worker node for incoming ingress +traffic.

+

To deploy all manifests required for the daemonset style, you can +run:

+
kubectl create -f docs/kubernetes/deploy/daemonset
+
+
# cat docs/kubernetes/deploy/daemonset/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: skipper-ingress
+  namespace: kube-system
+  labels:
+    application: skipper-ingress
+    version: v0.10.180
+    component: ingress
+spec:
+  selector:
+    matchLabels:
+      application: skipper-ingress
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      name: skipper-ingress
+      labels:
+        application: skipper-ingress
+        version: v0.11.1
+        component: ingress
+    spec:
+      priorityClassName: system-node-critical
+      serviceAccountName: skipper-ingress
+      tolerations:
+      - key: dedicated
+        operator: Exists
+      nodeSelector:
+        kubernetes.io/role: worker
+      hostNetwork: true
+      containers:
+      - name: skipper-ingress
+        image: registry.opensource.zalan.do/teapot/skipper:v0.12.0
+        ports:
+        - name: ingress-port
+          containerPort: 9999
+          hostPort: 9999
+        - name: metrics-port
+          containerPort: 9911
+        args:
+          - "skipper"
+          - "-kubernetes"
+          - "-kubernetes-in-cluster"
+          - "-kubernetes-path-mode=path-prefix"
+          - "-address=:9999"
+          - "-wait-first-route-load"
+          - "-proxy-preserve-host"
+          - "-serve-host-metrics"
+          - "-enable-ratelimits"
+          - "-experimental-upgrade"
+          - "-metrics-exp-decay-sample"
+          - "-reverse-source-predicate"
+          - "-lb-healthcheck-interval=3s"
+          - "-metrics-flavour=codahale,prometheus"
+          - "-enable-connection-metrics"
+          - "-max-audit-body=0"
+          - "-histogram-metric-buckets=.01,.025,.05,.075,.1,.2,.3,.4,.5,.75,1,2,3,4,5,7,10,15,20,30,60,120,300,600"
+        resources:
+          requests:
+            cpu: 150m
+            memory: 150Mi
+        readinessProbe:
+          httpGet:
+            path: /kube-system/healthz
+            port: 9999
+          initialDelaySeconds: 5
+          timeoutSeconds: 5
+        securityContext:
+          readOnlyRootFilesystem: true
+          runAsNonRoot: true
+          runAsUser: 1000
+
+

Please check, that you are using the latest +release, and do +not use latest tag in production. While skipper is quite stable as +library and proxy, there is ongoing development to make skipper more +safe, increase visibility, fix issues that lead to incidents and add +features.

+

Deployment

+

We start to deploy skipper-ingress as a deployment with an HPA, use +hostNetwork and expose the TCP port 9999 on each Kubernetes worker +node for incoming ingress traffic.

+

To deploy all manifests required for the deployment style, you can +run:

+
kubectl create -f docs/kubernetes/deploy/deployment
+
+

Now, let’s see what we have just deployed. +This will create serviceaccount, PodSecurityPolicy and RBAC rules such that +skipper-ingress is allowed to listen on the hostnetwork and poll +ingress resources.

+
# cat docs/kubernetes/deploy/deployment/rbac.yaml
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: hostnetwork
+spec:
+  hostNetwork: true
+  hostPorts:
+  - max: 10000
+    min: 50
+  supplementalGroups:
+    rule: RunAsAny
+  fsGroup:
+    rule: RunAsAny
+  runAsUser:
+    # Require the container to run without root privileges.
+    rule: 'MustRunAsNonRoot'
+  seLinux:
+    rule: RunAsAny
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: hostnetwork-psp
+rules:
+- apiGroups:
+  - extensions
+  resourceNames:
+  - hostnetwork
+  resources:
+  - podsecuritypolicies
+  verbs:
+  - use
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: skipper-ingress
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: skipper-ingress
+rules:
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses
+  verbs:
+  - get
+  - list
+- apiGroups:
+    - extensions
+  resources:
+    - ingresses
+  verbs:
+    - get
+    - list
+- apiGroups: [""]
+  resources:
+    - namespaces
+    - services
+    - endpoints
+    - pods
+  verbs:
+    - get
+    - list
+- apiGroups:
+    - discovery.k8s.io
+  resources:
+    - endpointslices
+  verbs:
+    - get
+    - list
+- apiGroups:
+  - zalando.org
+  resources:
+  - routegroups
+  verbs:
+  - get
+  - list
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: skipper-ingress
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: skipper-ingress
+subjects:
+- kind: ServiceAccount
+  name: skipper-ingress
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: skipper-ingress-hostnetwork-psp
+  namespace: kube-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: hostnetwork-psp
+subjects:
+- kind: ServiceAccount
+  name: skipper-ingress
+  namespace: kube-system
+
+

The next file creates deployment with all options passed to +skipper, that you should care in a basic production setup.

+
# cat docs/kubernetes/deploy/deployment/deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: skipper-ingress
+  namespace: kube-system
+  labels:
+    application: skipper-ingress
+    version: v0.11.40
+    component: ingress
+spec:
+  strategy:
+    rollingUpdate:
+      maxSurge: 0
+  selector:
+    matchLabels:
+      application: skipper-ingress
+  template:
+    metadata:
+      labels:
+        application: skipper-ingress
+        version: v0.11.40
+        component: ingress
+    spec:
+      affinity:
+        podAntiAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            - labelSelector:
+                matchExpressions:
+                - key: application
+                  operator: In
+                  values:
+                  - skipper-ingress
+              topologyKey: kubernetes.io/hostname
+      priorityClassName: system-cluster-critical
+      serviceAccountName: skipper-ingress
+      nodeSelector:
+        kubernetes.io/role: worker
+      dnsPolicy: ClusterFirstWithHostNet
+      hostNetwork: true
+      containers:
+      - name: skipper-ingress
+        image: registry.opensource.zalan.do/teapot/skipper:v0.12.0
+        ports:
+        - name: ingress-port
+          containerPort: 9999
+          hostPort: 9999
+        args:
+          - "skipper"
+          - "-kubernetes"
+          - "-kubernetes-in-cluster"
+          - "-kubernetes-path-mode=path-prefix"
+          - "-address=:9999"
+          - "-wait-first-route-load"
+          - "-proxy-preserve-host"
+          - "-serve-host-metrics"
+          - "-disable-metrics-compat"
+          - "-enable-profile"
+          - "-enable-ratelimits"
+          - "-experimental-upgrade"
+          - "-metrics-exp-decay-sample"
+          - "-reverse-source-predicate"
+          - "-lb-healthcheck-interval=3s"
+          - "-metrics-flavour=prometheus"
+          - "-enable-connection-metrics"
+          - "-max-audit-body=0"
+          - "-histogram-metric-buckets=.0001,.00025,.0005,.00075,.001,.0025,.005,.0075,.01,.025,.05,.075,.1,.2,.3,.4,.5,.75,1,2,3,4,5,7,10,15,20,30,60,120,300,600"
+          - "-expect-continue-timeout-backend=30s"
+          - "-keepalive-backend=30s"
+          - "-max-idle-connection-backend=0"
+          - "-response-header-timeout-backend=1m"
+          - "-timeout-backend=1m"
+          - "-tls-timeout-backend=1m"
+          - "-close-idle-conns-period=20s"
+          - "-idle-timeout-server=62s"
+          - "-read-timeout-server=5m"
+          - "-write-timeout-server=60s"
+          - '-default-filters-prepend=enableAccessLog(4,5) -> lifo(2000,20000,"3s")'
+        resources:
+          limits:
+            cpu: "4"
+            memory: "1Gi"
+          requests:
+            cpu: "4"
+            memory: "1Gi"
+        readinessProbe:
+          httpGet:
+            path: /kube-system/healthz
+            port: 9999
+          initialDelaySeconds: 60
+          timeoutSeconds: 5
+        securityContext:
+          readOnlyRootFilesystem: true
+          runAsNonRoot: true
+          runAsUser: 1000
+
+

This will deploy a HorizontalPodAutoscaler to scale skipper-ingress +based on load.

+
# cat docs/kubernetes/deploy/deployment/hpa.yaml
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+  name: skipper-ingress
+  namespace: kube-system
+  labels:
+    application: skipper-ingress
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: skipper-ingress
+  minReplicas: 3
+  maxReplicas: 50
+  metrics:
+  - type: Resource
+    resource:
+      name: cpu
+      targetAverageUtilization: 70
+  - type: Resource
+    resource:
+      name: memory
+      targetAverageUtilization: 70
+
+

The next file will group skipper-ingress with a service, such that internal +clients can access skipper via Kubernetes service.

+
# cat docs/kubernetes/deploy/deployment/service.yaml
+kind: Service
+apiVersion: v1
+metadata:
+  name: skipper-ingress
+  namespace: kube-system
+  labels:
+    application: skipper-ingress
+  annotations:
+    prometheus.io/path: /metrics
+    prometheus.io/port: "9911"
+    prometheus.io/scrape: "true"
+spec:
+  type: ClusterIP
+  ports:
+    - port: 80
+      targetPort: 9999
+      protocol: TCP
+  selector:
+    application: skipper-ingress
+
+

Test your skipper setup

+

We now deploy a simple demo application serving html:

+
# cat docs/kubernetes/deploy/demo/deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: skipper-demo
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      application: skipper-demo
+  template:
+    metadata:
+      labels:
+        application: skipper-demo
+    spec:
+      containers:
+      - name: skipper-demo
+        image: registry.opensource.zalan.do/teapot/skipper:v0.12.0
+        args:
+          - "skipper"
+          - "-inline-routes"
+          - "* -> inlineContent(\"<body style='color: white; background-color: green;'><h1>Hello!</h1>\") -> <shunt>"
+        ports:
+        - containerPort: 9090
+
+

We deploy a service type ClusterIP that we will select from ingress:

+
# cat docs/kubernetes/deploy/demo/svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+  name: skipper-demo
+  labels:
+    application: skipper-demo
+spec:
+  type: ClusterIP
+  ports:
+    - port: 80
+      protocol: TCP
+      targetPort: 9090
+      name: external
+  selector:
+    application: skipper-demo
+
+

To deploy the demo application, you have to run:

+
kubectl create -f docs/kubernetes/deploy/demo/
+
+

Now we have a skipper-ingress running as daemonset or deployment +exposing the TCP port 9999 on each worker nodes, which has a running +skipper-ingress instance, a backend application running with 2 +replicas that serves some html on TCP port 9090, and we expose a +cluster service on TCP port 80. Besides skipper-ingress, deployment +and service can not be reached from outside the cluster. Now we expose +the application with Ingress to the external network:

+
# cat demo-ing.yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: skipper-demo
+spec:
+  rules:
+  - host: skipper-demo.<mydomain.org>
+    http:
+      paths:
+      - backend:
+          service:
+            name: skipper-demo
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

To deploy this ingress, you have to run:

+
kubectl create -f demo-ing.yaml
+
+

Skipper will configure itself for the given ingress, such that you can test doing:

+
curl -v -H"Host: skipper-demo.<mydomain.org>" http://<nodeip>:9999/
+
+

The next question you may ask is: how to expose this to your customers?

+

The answer depends on your setup and complexity requirements. In the +simplest case you could add one A record in your DNS *.<mydomain.org> +to your frontend load balancer IP that directs all traffic from *.<mydomain.org> +to all Kubernetes worker nodes on TCP port 9999. The load balancer +health check should make sure, that only nodes with ready skipper-ingress +instances will get traffic.

+

A more complex setup we use in production and can be done with +something that configures your frontend load balancer, for example +kube-aws-ingress-controller, +and your DNS, external-dns +automatically.

+

Multiple skipper deployments

+

If you want to split for example internal and public traffic, it +might be a good choice to split your ingress deployments. Skipper has +the flag --kubernetes-ingress-class=<regexp> to only select ingress +objects that have the annotation kubernetes.io/ingress.class set to +something that is matched by <regexp>. Skipper will only create +routes for ingress objects with it’s annotation or ingress objects +that do not have this annotation.

+

The default ingress class is skipper, if not set. You have to create +your ingress objects with the annotation +kubernetes.io/ingress.class: skipper to make sure only skipper will +serve the traffic.

+

Example ingress:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    kubernetes.io/ingress.class: skipper
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Scoping Skipper Deployments to a Single Namespace

+

In some instances you might want skipper to only watch for ingress objects +created in a single namespace. This can be achieved by using +kubernetes-namespace=<string> where <string> is the Kubernetes namespace. +Specifying this option forces Skipper to look at the namespace ingresses +endpoint rather than the cluster-wide ingresses endpoint.

+

By default this value is an empty string ("") and will scope the skipper +instance to be cluster-wide, watching all Ingress objects across all namespaces.

+

Helm-based deployment

+

Helm calls itself the package manager for Kubernetes and therefore take cares of the deployment of whole applications including resources like services, configurations and so on.

+

Skipper is also available as community contributed Helm chart in the public quay.io registry. +The latest packaged release can be found here. +The source code is available at GitHub.

+

The chart includes resource definitions for the following use cases:

+ +

As this chart is not maintained by the Skipper developers and is still under development only the basic deployment workflow is covered here. +Check the GitHub repository for all details.

+

To be able to deploy the chart you will need the following components:

+
    +
  • helm CLI (Install guide here)
  • +
  • Helm registry plugin (available here)
  • +
+

If your environment is setup correctly you should be able to run helm version --client and helm registry version quay.io and get some information about your tooling without any error.

+

It is possible to deploy the chart without any further configuration like this:

+
helm registry upgrade quay.io/baez/skipper -- \
+    --install \
+    --wait \
+    "your release name e.g. skipper"
+
+ +

The --wait switch can be omitted as it only takes care that Helm is waiting until the chart is completely deployed (meaning all resources are created).

+

To update the deployment to a newer version the same command can be used.

+

If you have RBAC enabled in your Kubernetes instance you don’t have to create all the previously described resources on your own but you can let Helm create them by simply adding one more switch:

+
helm registry upgrade quay.io/baez/skipper -- \
+    --install \
+    --wait \
+    --set rbac.create=true \
+    "your release name e.g. skipper"
+
+ +

There are some more options available for customization of the chart. +Check the repository if you need more configuration possibilities.

+

Run as API Gateway with East-West setup

+

East-West means cluster internal service-to-service communication. +For this you need to resolve DNS to skipper for one or more additional +domains of your choice. When Ingress or +RouteGroup objects specify such domains Skipper +will add the configured predicates.

+

Skipper

+

To enable the East-West in skipper, you need to run skipper with +-kubernetes-east-west-range-domains and +-kubernetes-east-west-range-predicates configuration flags. Check the +East West Range feature. +Skipper will analyze all routes from Kubernetes objects and, the +identified East-West routes will have the predicates specified appended.

+

For example, for running skipper with the skipper.cluster.local +domain, and setting East-West routes to accept just internal traffic, +use the following config:

+
skipper \
+  -kubernetes-east-west-range-domains="skipper.cluster.local" \
+  -kubernetes-east-west-range-predicates='ClientIP("10.2.0.0/16")'
+
+

It assumes 10.2.0.0/16 is your PODs’ CIDR, you have to change it +accordingly to your environment.

+

You need also to have a kubernetes service type ClusterIP and write +down the IP (p.e. 10.3.11.28), which you will need in CoreDNS setup.

+

CoreDNS

+

You can create the DNS records with the template plugin from CoreDNS.

+

Corefile example: +

.:53 {
+    errors
+    health
+    kubernetes cluster.local in-addr.arpa ip6.arpa {
+        pods insecure
+        upstream
+        fallthrough in-addr.arpa ip6.arpa
+    }
+    template IN A skipper.cluster.local  {
+      match "^.*[.]skipper[.]cluster[.]local"
+      answer "{{ .Name }} 60 IN A 10.3.11.28"
+      fallthrough
+    }
+    prometheus :9153
+    proxy . /etc/resolv.conf
+    cache 30
+    reload
+}
+

+

Usage

+

If the setup is correct, skipper will protect the following ingress +example with the ClientIP predicate:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: demo
+  namespace: default
+spec:
+  rules:
+  - host: demo.skipper.cluster.local
+    http:
+      paths:
+      - backend:
+          service:
+            name: example
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Your clients inside the cluster should call this example with +demo.skipper.cluster.local in their host header. Example +from inside a container:

+
curl demo.skipper.cluster.local
+
+

Skipper won’t accept traffic from any IP outside of the configured +network CIDR.

+
+

Note

+

Depending on your environment, you might want to allow traffic not +just from the PODs’ CIDR, but, also, from your nodes’ CIDR. When doing +so, pay attention to do not allow traffic from your LoadBalancer +and, by consequence, external traffic. You can use different +combinations of predicates like ClientIP and SourceFromLast to +achieve the desired protection.

+
+

Running with Cluster Ratelimits

+

Cluster ratelimits require a communication exchange method to build a +skipper swarm to have a shared knowledge about the requests passing +all skipper instances. To enable this feature you need to add command +line option -enable-swarm and -enable-ratelimits. +The rest depends on the implementation, that can be:

+ +

Redis based

+

Additionally you have to add -swarm-redis-urls to skipper +args:. For example: -swarm-redis-urls=skipper-redis-0.skipper-redis.kube-system.svc.cluster.local:6379,skipper-redis-1.skipper-redis.kube-system.svc.cluster.local:6379.

+

Running skipper with hostNetwork in kubernetes will not be able to +resolve redis hostnames as shown in the example, if skipper does not +have dnsPolicy: ClusterFirstWithHostNet in it’s Pod spec, see also +DNS policy in the official Kubernetes documentation.

+

This setup is considered experimental and should be carefully tested +before running it in production.

+

Example redis statefulset with headless service:

+
apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  labels:
+    application: skipper-redis
+    version: v6.2.4
+  name: skipper-redis
+  namespace: kube-system
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      application: skipper-redis
+  serviceName: skipper-redis
+  template:
+    metadata:
+      labels:
+        application: skipper-redis
+        version: v6.2.4
+    spec:
+      containers:
+      - image: registry.opensource.zalan.do/library/redis-6-alpine:6-alpine-20210712
+        name: skipper-redis
+        ports:
+        - containerPort: 6379
+          protocol: TCP
+        readinessProbe:
+          exec:
+            command:
+            - redis-cli
+            - ping
+          failureThreshold: 3
+          initialDelaySeconds: 10
+          periodSeconds: 60
+          successThreshold: 1
+          timeoutSeconds: 1
+        resources:
+          limits:
+            cpu: 100m
+            memory: 100Mi
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      schedulerName: default-scheduler
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    application: skipper-redis
+  name: skipper-redis
+  namespace: kube-system
+spec:
+  clusterIP: None
+  ports:
+  - port: 6379
+    protocol: TCP
+    targetPort: 6379
+  selector:
+    application: skipper-redis
+  type: ClusterIP
+
+

SWIM based

+

SWIM +is a “Scalable Weakly-consistent Infection-style Process Group +Membership Protocol”, which is very interesting for example to use for +cluster ratelimits. This setup is not considered stable enough to run +production, yet.

+

Additionally you have to add the following command line flags to +skipper’s container spec args::

+
-swarm-port=9990
+-swarm-label-selector-key=application
+-swarm-label-selector-value=skipper-ingress
+-swarm-leave-timeout=5s
+-swarm-max-msg-buffer=4194304
+-swarm-namespace=kube-system
+
+

and open another port in Kubernetes and your Firewall settings to make +the communication work with TCP and UDP to the specified swarm-port:

+
- containerPort: 9990
+  hostPort: 9990
+  name: swarm-port
+  protocol: TCP
+
+

Upgrades

+

Please always read the announcements of the vX.Y.0 +release page, +because these will document in case we break something in a backwards non +compatible way. Most of the time it will be safe to deploy minor +version updates, but better to know in advance if something could +break.

+

=v0.14.0

+

Kubernetes dataclient removes support for ingress v1beta1. +What does it mean for you?

+
    +
  1. If you run with enabled -kubernetes-ingress-v1, you won’t need to + do anything and you can safely delete the flag while updating to + >=0.14.0.
  2. +
  3. If you use skipper as library and pass KubernetesIngressV1: true + via kubernetes.Options into kubernetes.New(), then you won’t need to + do anything and you can safely delete passing the option while updating to + >=0.14.0.
  4. +
  5. If you use Ingress v1beta1 and run Kubernetes cluster version that + does not support ingress v1, then you can’t update skipper to + >=0.14.0, before you upgrade your Kubernetes cluster.
  6. +
  7. If you use Ingress v1beta1 and run Kubernetes cluster version that + support ingress v1, then you need to allow skipper to access the new + APIs with a changed RBAC. See the guide below.
  8. +
+

If you are in case 4., you have to apply a change in your RBAC, please +check the diff or the full rendered file.

+

Diff view (same for deployment and daemonset): +

diff --git docs/kubernetes/deploy/deployment/rbac.yaml docs/kubernetes/deploy/deployment/rbac.yaml
+index 361f3789..c0e448a4 100644
+--- docs/kubernetes/deploy/deployment/rbac.yaml
++++ docs/kubernetes/deploy/deployment/rbac.yaml
+@@ -37,11 +37,18 @@ metadata:
+   name: skipper-ingress
+   namespace: kube-system
+ ---
+-apiVersion: rbac.authorization.k8s.io/v1beta1
++apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+   name: skipper-ingress
+ rules:
++- apiGroups:
++  - networking.k8s.io
++  resources:
++  - ingresses
++  verbs:
++  - get
++  - list
+ - apiGroups:
+     - extensions
+   resources:
+@@ -66,7 +73,7 @@ rules:
+   - get
+   - list
+ ---
+-apiVersion: rbac.authorization.k8s.io/v1beta1
++apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+   name: skipper-ingress
+@@ -79,7 +86,7 @@ subjects:
+   name: skipper-ingress
+   namespace: kube-system
+ ---
+-apiVersion: rbac.authorization.k8s.io/v1beta1
++apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+   name: skipper-ingress-hostnetwork-psp
+

+

Full rendered RBAC files (same for deployment and daemonset):

+
# cat docs/kubernetes/deploy/deployment/rbac.yaml
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: hostnetwork
+spec:
+  hostNetwork: true
+  hostPorts:
+  - max: 10000
+    min: 50
+  supplementalGroups:
+    rule: RunAsAny
+  fsGroup:
+    rule: RunAsAny
+  runAsUser:
+    # Require the container to run without root privileges.
+    rule: 'MustRunAsNonRoot'
+  seLinux:
+    rule: RunAsAny
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: hostnetwork-psp
+rules:
+- apiGroups:
+  - extensions
+  resourceNames:
+  - hostnetwork
+  resources:
+  - podsecuritypolicies
+  verbs:
+  - use
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: skipper-ingress
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: skipper-ingress
+rules:
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses
+  verbs:
+  - get
+  - list
+- apiGroups:
+    - extensions
+  resources:
+    - ingresses
+  verbs:
+    - get
+    - list
+- apiGroups: [""]
+  resources:
+    - namespaces
+    - services
+    - endpoints
+    - pods
+  verbs:
+    - get
+    - list
+- apiGroups:
+    - discovery.k8s.io
+  resources:
+    - endpointslices
+  verbs:
+    - get
+    - list
+- apiGroups:
+  - zalando.org
+  resources:
+  - routegroups
+  verbs:
+  - get
+  - list
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: skipper-ingress
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: skipper-ingress
+subjects:
+- kind: ServiceAccount
+  name: skipper-ingress
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: skipper-ingress-hostnetwork-psp
+  namespace: kube-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: hostnetwork-psp
+subjects:
+- kind: ServiceAccount
+  name: skipper-ingress
+  namespace: kube-system
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubernetes/ingress-usage/index.html b/kubernetes/ingress-usage/index.html new file mode 100644 index 0000000000..9650ad56e3 --- /dev/null +++ b/kubernetes/ingress-usage/index.html @@ -0,0 +1,3374 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Ingress Usage - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Skipper Ingress Usage

+

This documentation is meant for people deploying to Kubernetes +Clusters and describes to use Ingress and low level and high level +features Skipper provides.

+

RouteGroups, a relatively new feature, also +support each of these features, with an alternative format that +supports them in a more native way. The documentation contains a +section with +mapping +Ingress to RouteGroups.

+

Skipper Ingress Annotations

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Annotationexample datausage
zalando.org/backend-weights{"my-app-1": 80, "my-app-2": 20}blue-green deployments
zalando.org/skipper-filterconsecutiveBreaker(15)arbitrary filters
zalando.org/skipper-predicateQueryParam("version", "^alpha$")arbitrary predicates
zalando.org/skipper-routesMethod("OPTIONS") -> status(200) -> <shunt>extra custom routes
zalando.org/ratelimitratelimit(50, "1m")deprecated, use zalando.org/skipper-filter instead
zalando.org/skipper-ingress-redirect"true"change the default HTTPS redirect behavior for specific ingresses (true/false)
zalando.org/skipper-ingress-redirect-code301change the default HTTPS redirect code for specific ingresses
zalando.org/skipper-loadbalancerconsistentHashdefaults to roundRobin, see available choices
zalando.org/skipper-backend-protocolfastcgi(experimental) defaults to http, see available choices
zalando.org/skipper-ingress-path-modepath-prefix(deprecated) please use Ingress version 1 pathType option, which defaults to ImplementationSpecific and does not change the behavior. Skipper’s path-mode defaults to kubernetes-ingress, see available choices, to change the default use -kubernetes-path-mode.
+

Supported Service types

+

Ingress backend definitions are services, which have different +service types.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Service typesupportedworkaround
ClusterIPyes
NodePortyes
ExternalNameyes
LoadBalancernoit should not, because Kubernetes cloud-controller-manager will maintain it
+

HTTP Host header routing

+

HTTP host header is defined within the rules host section and this +route will match by http Host: app-default.example.org and route to +endpoints selected by the Kubernetes service app-svc on port 80.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

To have 2 routes with different Host headers serving the same +backends, you have to specify 2 entries in the rules section, as +Kubernetes defined the ingress spec. This is often used in cases of +migrations from one domain to another one or migrations to or from +bare metal datacenters to cloud providers or inter cloud or intra +cloud providers migrations. Examples are AWS account migration, AWS to +GCP migration, GCP to bare metal migration or bare metal to Alibaba +Cloud migration.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+  - host: foo.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Multiple Ingresses defining the same route

+
+

Warning

+

If multiple ingresses define the same host and the same predicates, traffic routing may become non-deterministic.

+
+

Consider the following two ingresses which have the same hostname and therefore +overlap. In skipper the routing of this is currently undefined as skipper +doesn’t pick one over the other, but just creates routes (possible overlapping) +for each of the ingresses.

+

In this example (taken from the issues we saw in production clusters) one +ingress points to a service with no endpoints and the other to a service with +endpoints. (Most likely service-x was renamed to service-x-live and the old +ingress was forgot).

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: service-x
+spec:
+  rules:
+  - host: service-x.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: service-x # this service has 0 endpoints
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: service-x-live
+spec:
+  rules:
+  - host: service-x.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: service-x-live
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Ingress path handling

+

Skipper supports all Kubernetes +path-types +as documented in Kubernetes documentation.

+

Ingress paths can be interpreted in five different modes:

+
    +
  1. pathType: Prefix results in PathSubtree predicate)
  2. +
  3. pathType: Exact results in Path predicate)
  4. +
  5. pathType: ImplementationSpecific
  6. +
  7. based on the kubernetes ingress specification
  8. +
  9. as plain regular expression
  10. +
  11. as a path prefix (same as pathType: Prefix and results in PathSubtree)
  12. +
+

The default is 3.1 the kubernetes ingress mode. It can be changed by a startup option +to any of the other modes, and the individual ingress rules can also override the +default behavior with the zalando.org/skipper-ingress-path-mode annotation. You can +also set for each path rule a different Kubernetes pathType like Prefix and Exact.

+

E.g.:

+
zalando.org/skipper-ingress-path-mode: path-prefix
+
+ +

Kubernetes ingress specification base path

+

By default, the ingress path mode is set to kubernetes-ingress, +which is interpreted as a regular expression with a mandatory leading +/, and is automatically prepended by a ^ control character, +enforcing that the path has to be at the start of the incoming request +path.

+

Plain regular expression

+

When the path mode is set to path-regexp, the ingress path is interpreted similar +to the default kubernetes ingress specification way, but is not prepended by the ^ +control character.

+

Path prefix

+

When the path mode is set to path-prefix, the ingress path is not a regular +expression. As an example, /foo/bar will match /foo/bar or /foo/bar/baz, but +won’t match /foo/barooz.

+

When PathPrefix is used, the path matching becomes deterministic when +a request could match more than one ingress routes otherwise.

+

In PathPrefix mode, when a Path or PathSubtree predicate is set in an +annotation, the predicate in the annotation takes precedence over the normal ingress +path.

+

Filters and Predicates

+
    +
  • Filters can manipulate http data, which is not possible in the ingress spec.
  • +
  • Predicates change the route matching, beyond normal ingress definitions
  • +
+

This example shows how to add predicates and filters:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-predicate: predicate1 && predicate2 && .. && predicateN
+    zalando.org/skipper-filter: filter1 -> filter2 -> .. -> filterN
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Custom Routes

+

Please consider using RouteGroups, instead of custom +routes!

+

Custom routes is a way of extending the default routes configured for +an ingress resource.

+

Sometimes you just want to return a header, redirect or even static +html content. You can return from skipper without doing a proxy call +to a backend, if you end your filter chain with <shunt>. The use of +<shunt> recommends the use in combination with status() filter, to +not respond with the default http code, which defaults to 404. To +match your custom route with higher priority than your ingress you +also have to add another predicate, for example the Method(“GET”) +predicate to match the route with higher +priority.

+

Custom routes specified in ingress will always add the Host() +predicate to match the host header specified in +the ingress rules:. If there is a path: definition in your +ingress, then it will be based on the skipper command line parameter +-kubernetes-path-mode set one of these predicates:

+ +

If you have a path: value defined in your ingress resource, a custom +route is not allowed to use Path() nor PathSubtree() predicates. +You will get an error in Skipper logs, similar to:

+
[APP]time="2019-01-02T13:30:16Z" level=error msg="Failed to add route having 2 path routes: Path(\"/foo/bar\") -> inlineContent(\"custom route\") -> status(200) -> <shunt>"
+
+

Redirects

+

Overwrite the current ingress with a redirect

+

Sometimes you want to +overwrite the current ingress with a redirect to a nicer downtime +page.

+

The following example shows how to create a temporary redirect with status +code 307 to https://outage.example.org. No requests will pass to your +backend defined, because the created route from the annotation +zalando.org/skipper-routes will get 3 Predicates +Host("^app-default[.]example[.]org$") && Path("/") && PathRegexp("/"), +instead of the 2 Predicates +Host("^app-default[.]example[.]org$") && Path("/"), that will be +created for the ingress backend.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: app
+  namespace: default
+  annotations:
+    zalando.org/skipper-routes: |
+       redirect_app_default: PathRegexp("/") -> redirectTo(307, "https://outage.example.org/") -> <shunt>;
+spec:
+  rules:
+  - host: "app-default.example.org"
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+
+

Redirect a specific path from ingress

+

Sometimes you want to have a redirect from +http://app-default.example.org/myredirect to +https://somewhere.example.org/another/path.

+

The following example shows how to create a permanent redirect with status +code 308 from http://app-default.example.org/myredirect to +https://somewhere.example.org/another/path, other paths will not be +redirected and passed to the backend selected by serviceName=app-svc and +servicePort=80:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: app
+  namespace: default
+  annotations:
+    zalando.org/skipper-routes: |
+       redirect_app_default: PathRegexp("/myredirect") -> redirectTo(308, "https://somewhere.example.org/another/path") -> <shunt>;
+spec:
+  rules:
+  - host: "app-default.example.org"
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+
+

Return static content

+

The following example sets a response header X: bar, a response body +<html><body>hello</body></html> and respond from the ingress +directly with a HTTP status code 200:

+
zalando.org/skipper-routes: |
+  Path("/") -> setResponseHeader("X", "bar") -> inlineContent("<html><body>hello</body></html>") -> status(200) -> <shunt>
+
+

Keep in mind that you need a valid backend definition to backends +which are available, otherwise Skipper would not accept the entire +route definition from the ingress object for safety reasons.

+

CORS example

+

This example shows how to add a custom route for handling OPTIONS requests.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-routes: |
+      Method("OPTIONS") ->
+      setResponseHeader("Access-Control-Allow-Origin", "*") ->
+      setResponseHeader("Access-Control-Allow-Methods", "GET, OPTIONS") ->
+      setResponseHeader("Access-Control-Allow-Headers", "Authorization") ->
+      status(200) -> <shunt>
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

This will generate a custom route for the ingress which looks like this:

+
Host(/^app-default[.]example[.]org$/) && Method("OPTIONS") ->
+  setResponseHeader("Access-Control-Allow-Origin", "*") ->
+  setResponseHeader("Access-Control-Allow-Methods", "GET, OPTIONS") ->
+  setResponseHeader("Access-Control-Allow-Headers", "Authorization") ->
+  status(200) -> <shunt>
+
+

Multiple routes

+

You can also set multiple routes, but you have to set the names of the +route as defined in eskip:

+
zalando.org/skipper-routes: |
+  routename1: Path("/") -> clientRatelimit(2, "1h") -> inlineContent("A") -> status(200) -> <shunt>;
+  routename2: Path("/foo") -> clientRatelimit(5, "1h") -> inlineContent("B") -> status(200) -> <shunt>;
+
+

Make sure the ; semicolon is used to terminate the routes, if you +use multiple routes definitions.

+

Disclaimer: This feature works only with having different Path* +predicates in ingress, if there are no paths rules defined. For +example this will not work:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: skipper-ingress
+  annotations:
+    kubernetes.io/ingress.class: skipper
+    zalando.org/skipper-routes: |
+       redirect1: Path("/foo/") -> redirectTo(308, "/bar/") -> <shunt>;
+spec:
+  rules:
+  - host: foo.bar
+    http:
+      paths:
+      - path: /something
+        pathType: Prefix
+        backend:
+          service:
+            name: something
+            port:
+              number: 80
+      - path: /else
+        pathType: Prefix
+        backend:
+          service:
+            name: else
+            port:
+              number: 80
+
+

A possible solution is to use skipper’s RouteGroups.

+

Filters - Basic HTTP manipulations

+

HTTP manipulations are done by using skipper filters. Changes can be +done in the request path, meaning request to your backend or in the +response path to the client, which made the request.

+

The following examples can be used within zalando.org/skipper-filter +annotation.

+

Add a request Header

+

Add a HTTP header in the request path to your backend.

+
setRequestHeader("X-Foo", "bar")
+
+ +

Add a response Header

+

Add a HTTP header in the response path of your clients.

+
setResponseHeader("X-Foo", "bar")
+
+ +

Enable compression

+

Compress responses with accepted encoding (more details here).

+
compress() // compress all valid MIME types
+compress("text/html") // only compress HTML files
+compress(11, "text/html") // control the level of compression, 1 = fastest, 11 = best compression (fallback to 9 for gzip), 0 = no compression
+
+ +

Set the Path

+

Change the path in the request path to your backend to /newPath/.

+
setPath("/newPath/")
+
+ +

Modify Path

+

Modify the path in the request path from /api/foo to your backend to /foo.

+
modPath("^/api/", "/")
+
+ +

Set the Querystring

+

Set the Querystring in the request path to your backend to ?text=godoc%20skipper.

+
setQuery("text", "godoc skipper")
+
+ +

Redirect

+

Create a redirect with HTTP code 301 to https://foo.example.org/.

+
redirectTo(301, "https://foo.example.org/")
+
+ +

Cookies

+

Set a Cookie in the request path to your backend.

+
requestCookie("test-session", "abc")
+
+ +

Set a Cookie in the response path of your clients.

+
responseCookie("test-session", "abc", 31536000)
+responseCookie("test-session", "abc", 31536000, "change-only")
+
+// response cookie without HttpOnly:
+jsCookie("test-session-info", "abc-debug", 31536000, "change-only")
+
+ +

Authorization

+

Our authentication and authorization tutorial +or filter auth godoc +shows how to use filters for authorization.

+

Basic Auth

+
% htpasswd -nbm myName myPassword
+
+basicAuth("/path/to/htpasswd")
+basicAuth("/path/to/htpasswd", "My Website")
+
+ +

Bearer Token (OAuth/JWT)

+

OAuth2/JWT tokens can be validated and allowed based on different +content of the token. Please check the filter documentation for that:

+ +

There are also auth predicates, which will allow +you to match a route based on the content of a token:

+
    +
  • JWTPayloadAnyKV()
  • +
  • JWTPayloadAllKV()
  • +
+

These are not validating the tokens, which should be done separately +by the filters mentioned above.

+

Diagnosis - Throttling Bandwidth - Latency

+

For diagnosis purpose there are filters that enable you to throttle +the bandwidth or add latency. For the full list of filters see our +diag filter godoc page.

+
bandwidth(30) // incoming in kb/s
+backendBandwidth(30) // outgoing in kb/s
+backendLatency(120) // in ms
+
+ +

Filter documentation:

+ +

Flow Id to trace request flows

+

To trace request flows skipper can generate a unique Flow Id for every +HTTP request that it receives. You can then find the trace of the +request in all your access logs. Skipper sets the X-Flow-Id header to +a unique value. Read more about this in our +flowid filter +and godoc.

+
 flowId("reuse")
+
+ +

Filters - reliability features

+

Filters can modify http requests and responses. There are plenty of +things you can do with them.

+

Circuitbreaker

+

Consecutive Breaker

+

The consecutiveBreaker +filter is a breaker for the ingress route that open if the backend failures +for the route reach a value of N (in this example N=15), where N is a +mandatory argument of the filter and there are some more optional arguments +documented.

+
consecutiveBreaker(15)
+
+ +

The ingress spec would look like this:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-filter: consecutiveBreaker(15)
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Rate Breaker

+

The rateBreaker +filter is a breaker for the ingress route that open if the backend +failures for the route reach a value of N within a window of the last +M requests, where N (in this example 30) and M (in this example 300) +are mandatory arguments of the filter and there are some more optional arguments +documented.

+
rateBreaker(30, 300)
+
+ +

The ingress spec would look like this:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-filter: rateBreaker(30, 300)
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Ratelimits

+

There are two kind of ratelimits:

+
    +
  1. Client side ratelimits are used to slow down login enumeration +attacks, that targets your login pages. This is a security protection +for DDoS or login attacks.
  2. +
  3. Service or backend side ratelimits are used to protect your +services due too much traffic. This can be used in an emergency +situation to make sure you calm down ingress traffic or in general if +you know how much calls per duration your backend is able to handle.
  4. +
  5. Cluster ratelimits can be enforced either on client or on service +side as described above.
  6. +
+

Ratelimits are enforced per route.

+

More details you will find in ratelimit +package +and in our ratelimit tutorial.

+

Client Ratelimits

+

The example shows 20 calls per hour per client, based on +X-Forwarded-For header or IP in case there is no X-Forwarded-For header +set, are allowed to each skipper instance for the given ingress.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-filter: clientRatelimit(20, "1h")
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

If you need to rate limit service to service communication and +you use Authorization headers to protect your backend from your +clients, then you can pass a 3 parameter to group clients by “Authorization +Header”:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-filter: clientRatelimit(20, "1h", "auth")
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Service Ratelimits

+

The example shows 50 calls per minute are allowed to each skipper +instance for the given ingress.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-filter: ratelimit(50, "1m")
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Cluster Ratelimits

+

Cluster ratelimits are eventual consistent and require the flag +-enable-swarm to be set.

+
Service
+

The example shows 50 calls per minute are allowed to pass this ingress +rule to the backend.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-filter: clusterRatelimit("groupSvcApp", 50, "1m")
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+
Client
+

The example shows 10 calls per hour are allowed per client, +X-Forwarded-For header, to pass this ingress rule to the backend.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-filter: clusterClientRatelimit("groupSvcApp", 10, "1h")
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Path ratelimit

+

To ratelimit a specific path use a second ingress definition like

+

apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: app-default
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: app-login
+  annotations:
+    zalando.org/skipper-predicate: Path("/login")
+    zalando.org/skipper-filter: clusterClientRatelimit("login-ratelimit", 10, "1h")
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+or use RouteGroups.

+

Shadow Traffic

+

If you want to test a new replacement of a production service with +production load, you can copy incoming requests to your new endpoint +and ignore the responses from your new backend. This can be done by +the tee() and teenf() filters.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-filter: teenf("https://app-new.example.org")
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Predicates

+

Predicates +are influencing the route matching, which you might want to carefully +test before using it in production. This enables you to do feature +toggles or time based enabling endpoints.

+

You can use all kinds of predicates +with filters together.

+

Feature Toggle

+

Feature toggles are often implemented as query string to select a new +feature. Normally you would have to implement this in your +application, but Skipper can help you with that and you can select +routes with an ingress definition.

+

You create 2 ingresses that matches the same route, here host header +match to app-default.example.org and one ingress has a defined query +parameter to select the route to the alpha version deployment. If the +query string in the URL has version=alpha set, for example +https://app-default.example.org/mypath?version=alpha, the service +alpha-svc will get the traffic, if not prod-svc.

+

alpha-svc:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-predicate: QueryParam("version", "^alpha$")
+  name: alpha-app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: alpha-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

prod-svc:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: prod-app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: prod-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

IP Whitelisting

+

This ingress route will only allow traffic from networks 1.2.3.0/24 and 195.168.0.0/17

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-predicate: Source("1.2.3.0/24", "195.168.0.0/17")
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

A/B test

+

Implementing A/B testing is heavy. Skipper can help you to do +that. You need to have a traffic split somewhere and have your +customers sticky to either A or B flavor of your application. Most +likely people would implement using cookies. Skipper can set a +cookie with responseCookie() +in a response to the client and the +cookie predicate +can be used to match the route based on the cookie. Like this you can +have sticky sessions to either A or B for your clients. This example +shows to have 10% traffic using A and the rest using B.

+

10% choice of setting the Cookie “flavor” to “A”:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-predicate: Traffic(.1, "flavor", "A")
+    zalando.org/skipper-filter: responseCookie("flavor", "A", 31536000)
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: a-app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Rest is setting Cookie “flavor” to “B”:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-filter: responseCookie("flavor, "B", 31536000)
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: b-app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

To be sticky, you have to create 2 ingress with predicate to match +routes with the cookie we set before. For “A” this would be:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-predicate: Cookie("flavor", /^A$/)
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: a-app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

For “B” this would be:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-predicate: Cookie("flavor", /^B$/)
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: b-app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Blue-Green deployments

+

To do blue-green deployments you have to have control over traffic +switching. Skipper gives you the opportunity to set weights to backend +services in your ingress specification. zalando.org/backend-weights +is a hash map, which key relates to the serviceName of the backend +and the value is the weight of traffic you want to send to the +particular backend. It works for more than 2 backends, but for +simplicity this example shows 2 backends, which should be the default +case for supporting blue-green deployments.

+

In the following example my-app-1 service will get 80% of the traffic +and my-app-2 will get 20% of the traffic:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: my-app
+  labels:
+    application: my-app
+  annotations:
+    zalando.org/backend-weights: |
+      {"my-app-1": 80, "my-app-2": 20}
+spec:
+  rules:
+  - host: my-app.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: my-app-1
+            port:
+              name: http
+        pathType: Prefix
+        path: /
+      - backend:
+          service:
+            name: my-app-2
+            port:
+              name: http
+        pathType: Prefix
+        path: /
+
+

For more advanced blue-green deployments, check out our stackset-controller.

+

Chaining Filters and Predicates

+

You can set multiple filters in a chain similar to the eskip format.

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-predicate: Cookie("flavor", /^B$/) && Source("1.2.3.0/24", "195.168.0.0/17")
+    zalando.org/skipper-filter: clientRatelimit(50, "10m") -> requestCookie("test-session", "abc")
+  name: app
+spec:
+  rules:
+  - host: app-default.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Controlling HTTPS redirect

+

Skipper Ingress can provide HTTP->HTTPS redirection. Enabling it and setting the status code used by default can +be done with the command line options: -kubernetes-https-redirect and -kubernetes-https-redirect-code. By using +annotations, this behavior can be overridden from the individual ingress specs for the scope of routes generated +based on these ingresses specs.

+

Annotations:

+
    +
  • zalando.org/skipper-ingress-redirect: the possible values are true or false. When the global HTTPS redirect is + disabled, the value true enables it for the current ingress. When the global redirect is enabled, the value + false disables it for the current ingress.
  • +
  • zalando.org/skipper-ingress-redirect-code: the possible values are integers 300 <= x < 400. Sets the redirect + status code for the current ingress.
  • +
+

Example:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-ingress-redirect: "true"
+    zalando.org/skipper-ingress-redirect-code: 301
+  name: app
+spec:
+  rules:
+  - host: mobile-api.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+

Load Balancer Algorithm

+

You can set the loadbalancer algorithm, which is used to find the next +endpoint for a given request with the ingress annotation +zalando.org/skipper-loadbalancer.

+

For example, for some workloads you might want to have always the same +endpoint for the same client. For this use case there is the +consistent hash algorithm, that finds for a client detected by the IP +or X-Forwarded-For header, the same backend. If the backend is not +available it would switch to another one.

+

Annotations:

+ +

Example:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    zalando.org/skipper-loadbalancer: consistentHash
+  name: app
+spec:
+  rules:
+  - host: websocket.example.org
+    http:
+      paths:
+      - backend:
+          service:
+            name: app-svc
+            port:
+              number: 80
+        pathType: ImplementationSpecific
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubernetes/routegroup-crd/index.html b/kubernetes/routegroup-crd/index.html new file mode 100644 index 0000000000..28b0ed3e31 --- /dev/null +++ b/kubernetes/routegroup-crd/index.html @@ -0,0 +1,1569 @@ + + + + + + + + + + + + + + + + + + + + + + + + + RouteGroup CRD Semantics - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

RouteGroup CRD Semantics

+

This document contains the semantic definition of the RouteGroup CRD. For more information, see the route group +documentation, or see the CRD yaml +definition.

+

Concepts

+

RouteGroup

+

A RouteGroup represents a grouped routing specification, with one or more backends, typically a Kubernetes +service. The Skipper routes yielded by a route group are handled atomically, meaning that if any problem is +detected during processing a route group, none of the generated routes from that group will be applied.

+

Hosts

+

A list of allowed DNS host names that an incoming HTTP request should match in order to be handled by the route +group. Host list is mandatory.

+

Backend

+

Typically a Kubernetes service, but not necessarily. The routes generated from route groups need to have a +backend, therefore at least one backend is mandatory.

+

Default backend

+

A route group can contain multiple routes. If the routes don’t identify the backend, then the default backends +are used. There can be multiple default backends, e.g. to support weighted A/B testing.

+

Route

+

Routes describe how a matching HTTP request is handled and where it is forwarded to.

+

Predicate

+

A predicate is used during route lookup to identify which route should handle an incoming request. Route group +routes provide dedicated fields for the most common predicates like the path or the HTTP method, but in the +predicates list field, it is possible to define and configure any predicate supported by Skipper. See the +Predicates section of the reference.

+

Filter

+

A filter is used during handling the request to shape the request flow. In a route group, any filter supported +by Skipper is allowed to be used. See the Filters +section of the reference.

+

RouteGroup - top level object

+

The route group spec must contain hosts, backends, routes and optional default backends.

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+spec:
+  hosts:
+  - <string>
+  backends:
+  - <backend>
+  defaultBackends:
+  - <backendRef>
+  routes:
+  - <route>
+
+

Backend

+

The <backend> object defines the type of a backend and the required configuration based on the type. Required +fields are the name and the type, while the rest of the fields may be required based on the type.

+
<backend>
+  name: <string>
+  type: <string>            one of "service|shunt|loopback|dynamic|lb|network"
+  address: <string>         optional, required for type=network
+  algorithm: <string>       optional, valid for type=lb|service, values=roundRobin|random|consistentHash|powerOfRandomNChoices
+  endpoints: <stringarray>  optional, required for type=lb
+  serviceName: <string>     optional, required for type=service
+  servicePort: <number>     optional, required for type=service
+
+

See more about Skipper backends in the backend documentation.

+

Backend reference

+

The <backendRef> object references a backend that is defined in the route group’s backends field. The name is +a required field, while the weight is optional. If no weight is used at all, then the traffic is split evenly +between the referenced backends. One or more backend reference may appear on the route group level as a default +backend, or in a route.

+
<backendRef>
+- backendName: <string>
+  weight: <number>          optional
+
+

Route

+

The <route> object defines the actual routing setup with custom matching rules (predicates), and request flow +shaping with filters.

+
<route>
+  path: <string>            either path or pathSubtree is allowed
+  pathSubtree: <string>     either path or pathSubtree is allowed
+  pathRegexp: <string>      optional
+  methods: <stringarray>    optional, one of the HTTP methods per entry "GET|HEAD|PATCH|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE", defaults to all
+  predicates: <stringarray> optional
+  filters: <stringarray>    optional
+  backends:                 optional, overrides defaults
+  - <backendRef>
+
+

The path, pathSubtree and pathRegexp fields work the same way as the predicate counterparts on eskip +routes. See the reference manual for more details.

+

The methods field defines which methods an incoming request can have in order to match the route.

+

The items in the predicates and filter fields take lists of predicates and filters, respectively, defined in +their eskip format. Example:

+
  predicates:
+  - Cookie("alpha", "enabled")
+  - Header("X-Test", "true")
+  filters:
+  - setQuery("test", "alpha")
+  - compress()
+
+

See also:

+ +

The references in the backends field, if present, define which backends a route should use.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubernetes/routegroup-validation/index.html b/kubernetes/routegroup-validation/index.html new file mode 100644 index 0000000000..cad6fba5e1 --- /dev/null +++ b/kubernetes/routegroup-validation/index.html @@ -0,0 +1,1500 @@ + + + + + + + + + + + + + + + + + + + + + + + + + RouteGroup Validation - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

RouteGroup Operations

+

RouteGroup is a Custom Resource +Definition +(CRD).

+

RouteGroup Validation

+

CRDs can be validated at create and update time. The +validation can be done via JSON Schemas, which enables input type +validation and string validation with regular expressions. +In addition to JSON Schema you can use a custom validation webhook.

+

For RouteGroup we provide a CRD yaml with JSON +schema +and a validation webhook as separate binary webhook in the same +docker container as skipper.

+

Synopsis

+
% docker run registry.opensource.zalan.do/teapot/skipper:latest webhook --help
+usage: webhook [<flags>]
+
+Flags:
+  --help                         Show context-sensitive help (also try --help-long and --help-man).
+  --debug                        Enable debug logging
+  --tls-cert-file=TLS-CERT-FILE  File containing the certificate for HTTPS
+  --tls-key-file=TLS-KEY-FILE    File containing the private key for HTTPS
+  --address=":9443"              The address to listen on
+
+

Validation Webhook Installation

+

A Kubernetes validation +webhook +can be installed next to the kubernetes API server. In order to do +this you need:

+
    +
  1. A container running the webhook
  2. +
  3. A ValidatingWebhookConfiguration configuration
  4. +
+

Kubernetes container spec for the RouteGroup validation webhook can +be installed in your kube-apiserver Pod, such that it can communicate +via localhost.

+

We use the TLS based ValidatingWebhookConfiguration +configuration, +that we show below, but you can also scroll down to the Configuration +without TLS. The configuration will make sure the validation +webhook is called on all create and update +operations to zalando.org/v1/routegroups by the Kubernetes API server.

+

Configuration with TLS

+

Here you can see the Pod spec with enabled TLS:

+
- name: routegroups-admission-webhook
+  image: registry.opensource.zalan.do/teapot/skipper:v0.13.3
+  args:
+    - webhook
+    - --address=:9085
+    - --tls-cert-file=/etc/kubernetes/ssl/admission-controller.pem
+    - --tls-key-file=/etc/kubernetes/ssl/admission-controller-key.pem
+  lifecycle:
+    preStop:
+      exec:
+        command: ["/bin/sh", "-c",  " sleep 60"]
+  readinessProbe:
+    httpGet:
+      scheme: HTTPS
+      path: /healthz
+      port: 9085
+    initialDelaySeconds: 5
+    timeoutSeconds: 5
+  resources:
+    requests:
+      cpu: 50m
+      memory: 100Mi
+  ports:
+    - containerPort: 9085
+  volumeMounts:
+    - mountPath: /etc/kubernetes/ssl
+      name: ssl-certs-kubernetes
+      readOnly: true
+
+

Make sure you pass the caBundle and set the url depending where your webhook container is running. +

apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+  name: "routegroup-admitter.teapot.zalan.do"
+  labels:
+    application: routegroups-admission-webhook
+webhooks:
+  - name: "routegroup-admitter.teapot.zalan.do"
+    rules:
+      - operations: ["CREATE", "UPDATE"]
+        apiGroups: ["zalando.org"]
+        apiVersions: ["v1"]
+        resources: ["routegroups"]
+    clientConfig:
+      url: "https://localhost:9085/routegroups"
+      caBundle: |
+        ...8<....
+    admissionReviewVersions: ["v1"]
+    sideEffects: None
+    timeoutSeconds: 5
+

+

Configuration without TLS

+

In case you don’t need TLS, you do not need some of the configuration +shown above.

+

Container spec without TLS:

+
- name: routegroups-admission-webhook
+  image: registry.opensource.zalan.do/teapot/skipper:v0.13.3
+  args:
+    - webhook
+    - --address=:9085
+  lifecycle:
+    preStop:
+      exec:
+        command: ["/bin/sh", "-c",  " sleep 60"]
+  readinessProbe:
+    httpGet:
+      path: /healthz
+      port: 9085
+    initialDelaySeconds: 5
+    timeoutSeconds: 5
+  resources:
+    requests:
+      cpu: 50m
+      memory: 100Mi
+  ports:
+    - containerPort: 9085
+
+

Validation webhook configuration without TLS:

+
apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+  name: "routegroup-admitter.teapot.zalan.do"
+  labels:
+    application: routegroups-admission-webhook
+webhooks:
+  - name: "routegroup-admitter.teapot.zalan.do"
+    rules:
+      - operations: ["CREATE", "UPDATE"]
+        apiGroups: ["zalando.org"]
+        apiVersions: ["v1"]
+        resources: ["routegroups"]
+    clientConfig:
+      url: "http://localhost:9085/routegroups"
+    admissionReviewVersions: ["v1"]
+    sideEffects: None
+    timeoutSeconds: 5
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubernetes/routegroups/index.html b/kubernetes/routegroups/index.html new file mode 100644 index 0000000000..6b90c1ea9c --- /dev/null +++ b/kubernetes/routegroups/index.html @@ -0,0 +1,2379 @@ + + + + + + + + + + + + + + + + + + + + + + + + + RouteGroups - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Route groups

+

Route groups are an alternative to the Kubernetes Ingress format for defining ingress rules. They allow to +define Skipper routing in Kubernetes, while providing a straightforward way to configure the routing features +supported by Skipper and not defined by the generic Ingress.

+

Skipper as Kubernetes Ingress controller

+

Skipper is an extensible HTTP router with rich route matching, and request flow and traffic shaping +capabilities. Through its integration with Kubernetes, it can be used in the role of an ingress controller for +forwarding incoming external requests to the right services in a cluster. Kubernetes provides the Ingress +specification to define the rules by which an ingress controller should handle the incoming traffic. The +specification is simple and generic, but doesn’t offer a straightforward way to benefit from Skipper’s rich HTTP +related functionality.

+

RouteGroups

+

A RouteGroup is a custom Kubernetes resource definition. It provides a way to define the ingress routing for +Kubernetes services. It allows route matching based on any HTTP request attributes, and provides a clean way for +the request flow augmentation and traffic shaping. It supports higher level features like gradual traffic +switching, A/B testing, and more.

+

Example:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-routes
+spec:
+  backends:
+  - name: variant-a
+    type: service
+    serviceName: service-a
+    servicePort: 80
+  - name: variant-b
+    type: service
+    serviceName: service-b
+    servicePort: 80
+  defaultBackends:
+  - backendName: variant-b
+  routes:
+  - pathSubtree: /
+    filters:
+    - responseCookie("canary", "A")
+    predicates:
+    - Traffic(.1)
+    backends:
+    - backendName: variant-a
+  - pathSubtree: /
+    filters:
+    - responseCookie("canary", "B")
+  - pathSubtree: /
+    predicates:
+    - Cookie("canary", "A")
+    backends:
+    - backendName: variant-a
+  - pathSubtree: /
+    predicates:
+    - Cookie("canary", "B")
+
+

(See a more detailed explanation of the above example further down in this +document.)

+

Links:

+ +

Requirements

+ +

Installation

+

The definition file of the CRD can be found as part of Skipper’s source code, at:

+

https://github.com/zalando/skipper/blob/master/dataclients/kubernetes/deploy/apply/routegroups_crd.yaml

+

To install it manually in a cluster, assuming the current directory is the root of Skipper’s source, call this +command:

+
kubectl apply -f dataclients/kubernetes/deploy/apply/routegroups_crd.yaml
+
+

This will install a namespaced resource definition, providing the RouteGroup kind:

+
    +
  • full name: routegroups.zalando.org
  • +
  • resource group: zalando.org/v1
  • +
  • resource names: routegroup, routegroups, rg, rgs
  • +
  • kind: RouteGroup
  • +
+

The route groups, once any is defined, can be displayed then via kubectl as:

+
kubectl get rgs
+
+

The API URL of the routegroup resources will be:

+

https://kubernetes-api-hostname/apis/zalando.org/v1/routegroups

+

Usage

+

The absolute minimal route group configuration for a Kubernetes service (my-service) looks as follows:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-route-group
+spec:
+  backends:
+  - name: my-backend
+    type: service
+    serviceName: my-service
+    servicePort: 80
+  routes:
+    - pathSubtree: /
+      backends:
+        - backendName: my-backend
+
+

This is equivalent to the ingress:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: my-ingress
+spec:
+  defaultBackend:
+    service:
+      name: my-service
+      port:
+        number: 80
+
+

Notice that the route group contains a list of actual backends, and the defined service backend is then +referenced as the default backend. This structure plays a role in supporting scenarios like A/B testing and +gradual traffic switching, explained below. The backend definition also has a type +field, whose values can be service, lb, network, shunt, loopback or dynamic. More details on that +below.

+

Creating, updating and deleting route groups happens the same way as with ingress objects. E.g, manually +applying a route group definition:

+
kubectl apply -f my-route-group.yaml
+
+

Hosts

+ +

Hosts contain hostnames that are used to match the requests handled by a given route group. They are also used +to update the required DNS entries and load balancer configuration if the cluster is set up that way.

+

Note that it is also possible to use any Skipper predicate in the routes of a route group, with the Host +predicate included, but the hostnames defined that way will not serve as input for the DNS configuration.

+

Backends

+ +

RouteGroups support different backends. The most typical backend type is the ‘service’, and it works the same +way as in case of ingress definitions.

+

In a RouteGroup, there can be multiple backends and they are listed on the top level of the route group spec, +and are referenced from the actual routes or as default backends.

+

type=service

+

This backend resolves to a Kubernetes service. It works the same way as in case of Ingress definitions. Skipper +resolves the Services to the available Endpoints belonging to the Service, and generates load balanced routes +using them. (This basically means that under the hood, a service backend becomes an lb backend.)

+

type=lb

+

This backend provides load balancing between multiple network endpoints. Keep in mind that the service type +backend automatically generates load balanced routes for the service endpoints, so this backend type typically +doesn’t need to be used for services.

+

type=network

+

This backend type results in routes that proxy incoming requests to the defined network address, regardless of +the Kubernetes semantics, and allows URLs that point somewhere else, potentially outside of the cluster, too.

+

type=shunt, type=loopback, type=dynamic

+

These backend types allow advanced routing setups. Please check the reference +manual for more details.

+

Default Backends

+ +

A default backend is a reference to one of the defined backends. When a route doesn’t specify which backend(s) +to use, the ones referenced in the default backends will be used.

+

In case there are no individual routes at all in the route group, a default set of routes (one or more) will be +generated and will proxy the incoming traffic to the default backends.

+

The reason, why multiple backends can be referenced as default, is that this makes it easy to execute gradual +traffic switching between different versions, even more than two, of the same application. See +more.

+

Routes

+ +

Routes define where to and how the incoming requests will be proxied. The predicates, including the path, +pathSubtree, pathRegexp and methods fields, and any free-form predicate listed under the predicates field, +control which requests are matched by a route, the filters can apply changes to the forwarded requests and the +returned responses, and the backend refs, if defined, override the default backends, where the requests will be +proxied to. If a route group doesn’t contain any explicit routes, but it contains default backends, a default +set of routes will be generated for the route group.

+

Important to bear in mind about the path fields, that the plain ‘path’ means exact path match, while +‘pathSubtree’ behaves as a path prefix, and so it is more similar to the path in the Ingress specification.

+

See also:

+ +

Gradual traffic switching

+

The weighted backend references allow to split the traffic of a single route and send it to different backends +with the ratio defined by the weights of the backend references. E.g:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-routes
+spec:
+  hosts:
+  - api.example.org
+  backends:
+  - name: api-svc-v1
+    type: service
+    serviceName: api-service-v1
+    servicePort: 80
+  - name: api-svc-v2
+    type: service
+    serviceName: foo-service-v2
+    servicePort: 80
+  routes:
+  - pathSubtree: /api
+    backends:
+    - backendName: api-svc-v1
+      weight: 80
+    - backendName: api-svc-v2
+      weight: 20
+
+

In case of the above example, 80% of the requests is sent to api-service-v1 and the rest is sent to +api-service-v2.

+

Since this type of weighted traffic switching can be used in combination with the Traffic predicate, it is +possible to control the routing of a long running A/B test, while still executing gradual traffic switching +independently to deploy a new version of the variants, maybe to deploy a fix only to one variant. E.g:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-routes
+spec:
+  hosts:
+  - api.example.org
+  backends:
+  - name: variant-a
+    type: service
+    serviceName: service-a
+    servicePort: 80
+  - name: variant-b
+    type: service
+    serviceName: service-b-v1
+    servicePort: 80
+  - name: variant-b-v2
+    type: service
+    serviceName: service-b-v2
+    servicePort: 80
+  defaultBackends:
+  - backendName: variant-b
+    weight: 80
+  - backendName: variant-b-v2
+    weight: 20
+  routes:
+  - filters:
+    - responseCookie("canary", "A")
+    predicates:
+    - Traffic(.1)
+    backends:
+    - backendName: variant-a
+  - filters:
+    - responseCookie("canary", "B")
+  - predicates:
+    - Cookie("canary", "A")
+    backends:
+    - backendName: variant-a
+  - predicates:
+    - Cookie("canary", "B")
+
+

See also:

+ +

Mapping from Ingress to RouteGroups

+

RouteGroups are one-way compatible with Ingress, meaning that every Ingress specification can be expressed in +the RouteGroup format, as well. In the following, we describe the mapping from Ingress fields to RouteGroup +fields.

+

Ingress with default backend

+

Ingress:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: my-ingress
+spec:
+  defaultBackend:
+    service:
+      name: my-service
+      port:
+        number: 80
+
+

RouteGroup:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-route-group
+spec:
+  backends:
+  - name: my-backend
+    type: service
+    serviceName: my-service
+    servicePort: 80
+  defaultBackends:
+  - backendName: my-backend
+
+

Ingress with path rule

+

Ingress:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: my-ingress
+spec:
+  rules:
+  - host: api.example.org
+    http:
+      paths:
+      - path: /api
+        pathType: Prefix
+        backend:
+          service:
+            name: my-service
+            port:
+              number: 80
+
+

RouteGroup:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-route-group
+spec:
+  hosts:
+  - api.example.org
+  backends:
+  - name: my-backend
+    type: service
+    serviceName: my-service
+    servicePort: 80
+  routes:
+  - pathSubtree: /api
+
+

Ingress with multiple hosts

+

Ingress (we need to define two rules):

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: my-ingress
+spec:
+  rules:
+  - host: api.example.org
+    http:
+      paths:
+      - path: /api
+        pathType: Prefix
+        backend:
+          service:
+            name: my-service
+            port:
+              number: 80
+  - host: legacy-name.example.org
+    http:
+      paths:
+      - path: /api
+        pathType: Prefix
+        backend:
+          service:
+            name: my-service
+            port:
+              number: 80
+
+

RouteGroup (we just define an additional host):

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-route-group
+spec:
+  hosts:
+  - api.example.org
+  - legacy-name.example.org
+  backends:
+  - name: my-backend
+    type: service
+    serviceName: my-service
+    servicePort: 80
+  routes:
+  - pathSubtree: /api
+
+

Ingress with multiple hosts, and different routing

+

For those cases when using multiple hostnames in the same ingress with different rules, we need to apply a +small workaround for the equivalent route group spec. Ingress:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: my-ingress
+spec:
+  rules:
+  - host: api.example.org
+    http:
+      paths:
+      - path: /api
+        pathType: Prefix
+        backend:
+          service:
+            name: my-service
+            port:
+              number: 80
+  - host: legacy-name.example.org
+    http:
+      paths:
+      - path: /application
+        pathType: Prefix
+        backend:
+          service:
+            name: my-service
+            port:
+              number: 80
+
+

RouteGroup (we need to use additional host predicates):

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-route-group
+spec:
+  hosts:
+  - api.example.org
+  - legacy-name.example.org
+  backends:
+  - name: my-backend
+    type: service
+    serviceName: my-service
+    servicePort: 80
+  routes:
+  - pathSubtree: /api
+    predicates:
+    - Host("api.example.org")
+  - pathSubtree: /application
+    predicates:
+    - Host("legacy-name.example.org")
+
+

The RouteGroups allow multiple hostnames for each route group, but by default, their union is used during +routing. If we want to distinguish between them, then we need to use an additional Host predicate in the routes. +Importantly, only the hostnames listed under the hosts field serve as input for the DNS and LB configuration.

+

Mapping Skipper Ingress extensions to RouteGroups

+

Skipper accepts a set of annotations in Ingress objects that give access to certain Skipper features that would +not be possible with the native fields of the Ingress spec, e.g. improved path handling or rate limiting. These +annotations can be expressed now natively in the RouteGroups.

+

zalando.org/backend-weights

+

Backend weights are now part of the backend references, and they can be controlled for multiple backend sets +within the same route group. See Gradual traffic switching.

+

zalando.org/skipper-filter and zalando.org/skipper-predicate

+

Filters and predicates are now part of the route objects, and different set of filters or predicates can be set +for different routes.

+

zalando.org/skipper-routes

+

“Custom routes” in a route group are unnecessary, because every route can be configured with predicates, filters +and backends without limitations. E.g where an ingress annotation’s metadata may look like this:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: my-ingress
+  zalando.org/skipper-routes: |
+    Method("OPTIONS") -> status(200) -> <shunt>
+spec:
+  backend:
+    service:
+      name: my-service
+      port:
+        number: 80
+
+

the equivalent RouteGroup would look like this:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-route-group
+spec:
+  backends:
+  - name: my-backend
+    type: service
+    serviceName: my-service
+    servicePort: 80
+  - name: options200
+    type: shunt
+  defaultBackends:
+  - backendName: my-backend
+  routes:
+  - pathSubtree: /
+  - pathSubtree: /
+    methods: OPTIONS
+    filters:
+    - status(200)
+    backends:
+    - backendName: options200
+
+

zalando.org/ratelimit

+

The ratelimiting can be defined on the route level among the filters, in the same format as in this annotation.

+

zalando.org/skipper-ingress-redirect and zalando.org/skipper-ingress-redirect-code

+

Skipper ingress provides global HTTPS redirect, but it allows individual ingresses to override the global +settings: enabling/disabling it and changing the default redirect code. With route groups, this override can be +achieved by simply defining an additional route, with the same matching rules, and therefore the override can be +controlled eventually on a route basis. E.g:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-route-group
+spec:
+  backends:
+  - name: my-backend
+    type: service
+    serviceName: my-service
+    servicePort: 80
+  - name: redirectShunt
+    type: shunt
+  defaultBackends:
+  - backendName: my-backend
+  routes:
+  - pathSubtree: /
+  - pathSubtree: /
+    predicates:
+    - Header("X-Forwarded-Proto", "http")
+    filters:
+    - redirectTo(302, "https:")
+    backends:
+    - backendName: redirectShunt
+
+

zalando.org/skipper-loadbalancer

+

Skipper Ingress doesn’t use the ClusterIP of the Service for forwarding the traffic to, but sends it directly to +the Endpoints represented by the Service, and balances the load between them with the round-robin algorithm. The +algorithm choice can be overridden by this annotation. In case of the RouteGroups, the algorithm is simply an +attribute of the backend definition, and it can be set individually for each backend. E.g:

+
  backends:
+  - name: my-backend
+    type: service
+    serviceName: my-service
+    servicePort: 80
+    algorithm: consistentHash
+
+

See also:

+ +

zalando.org/skipper-ingress-path-mode

+

The route objects support the different path lookup modes, by using the path, pathSubtree or the +pathRegexp field. See also the route matching +explained for the internals. The mapping is as follows:

+ + + + + + + + + + + + + + + + + +
Ingress pathType:RouteGroup:
Exact and /foopath: /foo
Prefix and /foopathSubtree: /foo
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Ingress (pathType: ImplementationSpecific):RouteGroup:
kubernetes-ingress and /foopathRegexp: ^/foo
path-regexp and /foopathRegexp: /foo
path-prefix and /foopathSubtree: /foo
kubernetes-ingress and /foo$path: /foo
+

Multiple skipper deployments

+

If you want to split for example internal and public traffic, it +might be a good choice to split your RouteGroups. Skipper has +the flag --kubernetes-routegroup-class=<string> to only select RouteGroup +objects that have the annotation zalando.org/routegroup.class set to +<string>. Skipper will only create routes for RouteGroup objects with +it’s annotation or RouteGroup objects that do not have this annotation. The +default class is skipper, if not set.

+

Example RouteGroup:

+
apiVersion: zalando.org/v1
+kind: RouteGroup
+metadata:
+  name: my-route-group
+  annotations:
+    zalando.org/routegroup.class: internal
+spec:
+  backends:
+  - name: my-backend
+    type: service
+    serviceName: my-service
+    servicePort: 80
+  defaultBackends:
+  - backendName: my-service
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/operation/deployment/index.html b/operation/deployment/index.html new file mode 100644 index 0000000000..c4e34a8b58 --- /dev/null +++ b/operation/deployment/index.html @@ -0,0 +1,1355 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Deployment - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Deployments and Data-Clients

+

Edge HTTP Routing

+

Edge HTTP routing is the first hit to your production HTTP +loadbalancer. Skipper can serve this well and reliably in production since 2016.

+

On the edge you want to dispatch incoming HTTP requests to your +backends, which could be a microservice architecture.

+

In this deployment mode you might have 100k HTTP routes, which are +used in production and modified by many parties.

+

To support this scenario we have the etcd dataclient.

+

Etcd is a distributed database.

+

TODO: why we use ETCD for this purpose

+

Kubernetes Ingress

+

Kubernetes Ingress is the +component responsible to route traffic into your +Kubernetes cluster. +As deployer you can define an ingress object and an ingress controller +will make sure incoming traffic gets routed to her backend service as +defined. Skipper supports this scenario with the +Kubernetes dataclient and is used in +production since end of 2016.

+

Skipper as ingress controller does not need to have any file +configuration or anything external which configures Skipper. Skipper +automatically finds Ingress objects and configures routes +automatically, without reloading. The only requirement is to target +all traffic you want to serve with Kubernetes to a loadbalancer pool +of Skippers. This is a clear advantage over other ingress controllers +like nginx, haproxy or envoy.

+

Read more about Skipper’s Kubernetes dataclient.

+

Demos / Talks

+

In demos you may want to show arbitrary hello world applications. +You can easily describe html or json output on the command line with +the route-string dataclient.

+

Simple Routes File

+

The most static deployment that is known from apache, nginx or haproxy +is write your routes into a file and start your http server. +This is what the Eskip file dataclient is about.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/operation/operation/index.html b/operation/operation/index.html new file mode 100644 index 0000000000..5967722440 --- /dev/null +++ b/operation/operation/index.html @@ -0,0 +1,3432 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Operation - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Operations

+

This is the work in progress operations guide for showing information, +which are relevant for production use.

+

Skipper is proven to scale with number of routes beyond 300.000 routes +per instance. Skipper is running with peaks to 65.000 http requests +per second using multiple instances.

+

Connection Options

+

Skipper’s connection options are allowing you to set Go’s http.Server +Options on the client side and http.Transport on the backend side.

+

“It is recommended to read +this blog post about net http timeouts +in order to better understand the impact of these settings.

+

Backend

+

Backend is the side skipper opens a client connection to.

+

Closing idle connections is required for DNS failover, because Go’s +http.Transport caches +DNS lookups and needs to create new connections for doing so. Skipper +will start a goroutine and use the specified +time.Duration to call +CloseIdleConnections() on that +http.Transport.

+
-close-idle-conns-period string
+    period of closing all idle connections in seconds or as a
+    duration string. Not closing when less than 0 (default "20")
+
+ +

This will set MaxIdleConnsPerHost on the +http.Transport to limit +the number of idle connections per backend such that we do not run out +of sockets.

+
-idle-conns-num int
+    maximum idle connections per backend host (default 64)
+
+ +

This will set MaxIdleConns on the +http.Transport to limit +the number for all backends such that we do not run out of sockets.

+
-disable-http-keepalives bool
+    forces backend to always create a new connection
+
+ +

This will set DisableKeepAlives on the +http.Transport to disable +HTTP keep-alive and to only use the connection for single request.

+
-max-idle-connection-backend int
+    sets the maximum idle connections for all backend connections
+
+ +

This will set TLSHandshakeTimeout on the +http.Transport to have +timeouts based on TLS connections.

+
-tls-timeout-backend duration
+    sets the TLS handshake timeout for backend connections (default 1m0s)
+
+ +

This will set Timeout on +net.Dialer that is the +implementation of DialContext, which is the TCP connection pool used in the +http.Transport.

+
-timeout-backend duration
+    sets the TCP client connection timeout for backend connections (default 1m0s)
+
+ +

This will set KeepAlive on +net.Dialer that is the +implementation of DialContext, which is the TCP connection pool used in the +http.Transport.

+
-keepalive-backend duration
+    sets the keepalive for backend connections (default 30s)
+
+ +

This will set DualStack (IPv4 and IPv6) on +net.Dialer that is the +implementation of DialContext, which is the TCP connection pool used in the +http.Transport.

+
-enable-dualstack-backend
+    enables DualStack for backend connections (default true)
+
+ +

Client

+

Client is the side skipper gets incoming calls from. +Here we can set timeouts in different parts of the http connection.

+

This will set ReadTimeout in +http.Server handling +incoming calls from your clients.

+
-read-timeout-server duration
+    set ReadTimeout for http server connections (default 5m0s)
+
+ +

This will set ReadHeaderTimeout in +http.Server handling +incoming calls from your clients.

+
-read-header-timeout-server duration
+    set ReadHeaderTimeout for http server connections (default 1m0s)
+
+ +

This will set WriteTimeout in +http.Server handling +incoming calls from your clients.

+
-write-timeout-server duration
+    set WriteTimeout for http server connections (default 1m0s)
+
+ +

This will set IdleTimeout in +http.Server handling +incoming calls from your clients. If you have another loadbalancer +layer in front of your Skipper http routers, for example AWS Application Load +Balancers, +you should make sure, that Skipper’s idle-timeout-server setting is +bigger than the idle timeout from the loadbalancer in front. Wrong +combinations of idle timeouts can lead to a few unexpected HTTP 502.

+
-idle-timeout-server duration
+    maximum idle connections per backend host (default 1m0s)
+
+ +

This will set MaxHeaderBytes in +http.Server to limit the +size of the http header from your clients.

+
-max-header-bytes int
+    set MaxHeaderBytes for http server connections (default 1048576)
+
+ +

TCP LIFO

+

Skipper implements now controlling the maximum incoming TCP client +connections.

+

The purpose of the mechanism is to prevent Skipper requesting more memory +than available in case of too many concurrent connections, especially in +an autoscaling deployment setup, in those case when the scaling is not +fast enough to follow sudden connection spikes.

+

This solution relies on a listener implementation combined with a LIFO +queue. It allows only a limited number of connections being handled +concurrently, defined by the max concurrency configuration. When the max +concurrency limit is reached, the new incoming client connections are +stored in a queue. When an active (accepted) connection is closed, the +most recent pending connection from the queue will be accepted. When the +queue is full, the oldest pending connection is closed and dropped, and the +new one is inserted into the queue.

+

The feature can be enabled with the -enable-tcp-queue flag. The maximum +concurrency can bet set with the -max-tcp-listener-concurrency flag, or, +if this flag is not set, then Skipper tries to infer the maximum accepted +concurrency from the system by reading the +/sys/fs/cgroup/memory/memory.limit_in_bytes file. In this case, it uses the +average expected per request memory requirement, which can be set with the +-expected-bytes-per-request flag.

+

Note that the automatically inferred limit may not work as expected in an +environment other than cgroups v1.

+

OAuth2 Tokeninfo

+

OAuth2 filters integrate with external services and have their own +connection handling. Outgoing calls to these services have a +default timeout of 2s, which can be changed by the flag +-oauth2-tokeninfo-timeout=<OAuthTokeninfoTimeout>.

+

OAuth2 Tokenintrospection RFC7662

+

OAuth2 filters integrate with external services and have their own +connection handling. Outgoing calls to these services have a +default timeout of 2s, which can be changed by the flag +-oauth2-tokenintrospect-timeout=<OAuthTokenintrospectionTimeout>.

+

Monitoring

+

Monitoring is one of the most important things you need to run in +production and skipper has a godoc page +for the metrics package, +describing options and most keys you will find in the metrics handler +endpoint. The default is listening on :9911/metrics. You can modify +the listen port with the -support-listener flag. Metrics can exposed +using formats Codahale (json) or Prometheus and be configured by +-metrics-flavour=, which defaults to codahale. To expose both +formats you can use a comma separated list: -metrics-flavour=codahale,prometheus.

+

Prometheus

+

In case you want to get metrics in Prometheus format exposed, use this +option to enable it:

+
-metrics-flavour=prometheus
+
+ +

It will return Prometheus metrics on the +common metrics endpoint :9911/metrics.

+

To monitor skipper we recommend the following queries:

+
    +
  • P99 backend latency: histogram_quantile(0.99, sum(rate(skipper_serve_host_duration_seconds_bucket{}[1m])) by (le))
  • +
  • HTTP 2xx rate: histogram_quantile(0.99, sum(rate(skipper_serve_host_duration_seconds_bucket{code =~ "2.*"}[1m])) by (le) )
  • +
  • HTTP 4xx rate: histogram_quantile(0.99, sum(rate(skipper_serve_host_duration_seconds_bucket{code =~ "4.*"}[1m])) by (le) )
  • +
  • HTTP 5xx rate: histogram_quantile(0.99, sum(rate(skipper_serve_host_duration_seconds_bucket{code =~ "52.*"}[1m])) by (le) )
  • +
  • Max goroutines (depends on label selector): max(go_goroutines{application="skipper-ingress"})
  • +
  • Max threads (depends on label selector): max(go_threads{application="skipper-ingress"})
  • +
  • max heap memory in use in MB (depends on label selector): max(go_memstats_heap_inuse_bytes{application="skipper-ingress"}) / 1024 / 1000
  • +
  • Max number of heap objects (depends on label selector): max(go_memstats_heap_objects{application="skipper-ingress"})
  • +
  • Max of P75 Go GC runtime in ms (depends on label selector): max(go_gc_duration_seconds{application="skipper-ingress",quantile="0.75"}) * 1000 * 1000
  • +
  • P99 request filter duration (depends on label selector): histogram_quantile(0.99, sum(rate(skipper_filter_request_duration_seconds_bucket{application="skipper-ingress"}[1m])) by (le) )
  • +
  • P99 response filter duration (depends on label selector): histogram_quantile(0.99, sum(rate(skipper_filter_response_duration_seconds_bucket{application="skipper-ingress"}[1m])) by (le) )
  • +
  • If you use Kubernetes limits or Linux cgroup CFS quotas (depends on label selector): sum(rate(container_cpu_cfs_throttled_periods_total{container_name="skipper-ingress"}[1m]))
  • +
+

You may add static metrics labels like version using Prometheus relabeling feature.

+

Connection metrics

+

This option will enable known loadbalancer connections metrics, like +counters for active and new connections. This feature sets a metrics +callback on http.Server and +uses a counter to collect +http.ConnState.

+
-enable-connection-metrics
+    enables connection metrics for http server connections
+
+ +

It will expose them in /metrics, for example json structure looks like this example:

+
{
+  "counters": {
+    "skipper.lb-conn-active": {
+      "count": 6
+    },
+    "skipper.lb-conn-closed": {
+      "count": 6
+    },
+    "skipper.lb-conn-idle": {
+      "count": 6
+    },
+    "skipper.lb-conn-new": {
+      "count": 6
+    }
+  },
+  /* stripped a lot of metrics here */
+}
+
+

LIFO metrics

+

When enabled in the routes, LIFO queues can control the maximum concurrency level +proxied to the backends and mitigate the impact of traffic spikes. The current +level of concurrency and the size of the queue can be monitored with gauges per +each route using one of the lifo filters. To enable monitoring for the lifo +filters, use the command line option:

+
-enable-route-lifo-metrics
+
+ +

When queried, it will return metrics like:

+
{
+  "gauges": {
+    "skipper.lifo.routeXYZ.active": {
+      "value": 245
+    },
+    "skipper.lifo.routeXYZ.queued": {
+      "value": 27
+    }
+  }
+}
+
+

Application metrics

+

Application metrics for your proxied applications you can enable with the option:

+
-serve-host-metrics
+    enables reporting total serve time metrics for each host
+-serve-route-metrics
+    enables reporting total serve time metrics for each route
+
+ +

This will make sure you will get stats for each “Host” header or the +route name as “timers”. The following is an example for +-serve-host-metrics:

+
"timers": {
+  "skipper.servehost.app1_example_com.GET.200": {
+    "15m.rate": 0.06830666203045982,
+    "1m.rate": 2.162612637718806e-06,
+    "5m.rate": 0.008312609284452856,
+    "75%": 236603815,
+    "95%": 236603815,
+    "99%": 236603815,
+    "99.9%": 236603815,
+    "count": 3,
+    "max": 236603815,
+    "mean": 116515451.66666667,
+    "mean.rate": 0.0030589345776699827,
+    "median": 91273391,
+    "min": 21669149,
+    "stddev": 89543653.71950394
+  },
+  "skipper.servehost.app1_example_com.GET.304": {
+    "15m.rate": 0.3503336738177459,
+    "1m.rate": 0.07923086447313292,
+    "5m.rate": 0.27019839341602214,
+    "75%": 99351895.25,
+    "95%": 105381847,
+    "99%": 105381847,
+    "99.9%": 105381847,
+    "count": 4,
+    "max": 105381847,
+    "mean": 47621612,
+    "mean.rate": 0.03087161486272533,
+    "median": 41676170.5,
+    "min": 1752260,
+    "stddev": 46489302.203724876
+  },
+  "skipper.servehost.app1_example_com.GET.401": {
+    "15m.rate": 0.16838468990057648,
+    "1m.rate": 0.01572861413072501,
+    "5m.rate": 0.1194724817779537,
+    "75%": 91094832,
+    "95%": 91094832,
+    "99%": 91094832,
+    "99.9%": 91094832,
+    "count": 2,
+    "max": 91094832,
+    "mean": 58090623,
+    "mean.rate": 0.012304914018033056,
+    "median": 58090623,
+    "min": 25086414,
+    "stddev": 33004209
+  }
+},
+
+

Note you can reduce the dimension of the metrics by removing the HTTP +status code and method from it. Use the -serve-method-metric=false +and/or -serve-status-code-metric=false. Both flags are enabled by +default. For prometheus metrics flavour, a counter with both the HTTP +method and status code can be enabled with -serve-host-counter or +-serve-route-counter, even if these flags are disabled.

+

To change the sampling type of how metrics are handled from +uniform +to exponential decay, +you can use the following option, which is better for not so huge +utilized applications (less than 100 requests per second):

+
-metrics-exp-decay-sample
+    use exponentially decaying sample in metrics
+
+ +

Go metrics

+

Metrics from the +go runtime memstats +are exposed from skipper to the metrics endpoint, default listener +:9911, on path /metrics

+

Go metrics - Codahale

+
"gauges": {
+  "skipper.runtime.MemStats.Alloc": {
+    "value": 3083680
+  },
+  "skipper.runtime.MemStats.BuckHashSys": {
+    "value": 1452675
+  },
+  "skipper.runtime.MemStats.DebugGC": {
+    "value": 0
+  },
+  "skipper.runtime.MemStats.EnableGC": {
+    "value": 1
+  },
+  "skipper.runtime.MemStats.Frees": {
+    "value": 121
+  },
+  "skipper.runtime.MemStats.HeapAlloc": {
+    "value": 3083680
+  },
+  "skipper.runtime.MemStats.HeapIdle": {
+    "value": 778240
+  },
+  "skipper.runtime.MemStats.HeapInuse": {
+    "value": 4988928
+  },
+  "skipper.runtime.MemStats.HeapObjects": {
+    "value": 24005
+  },
+  "skipper.runtime.MemStats.HeapReleased": {
+    "value": 0
+  },
+  "skipper.runtime.MemStats.HeapSys": {
+    "value": 5767168
+  },
+  "skipper.runtime.MemStats.LastGC": {
+    "value": 1516098381155094500
+  },
+  "skipper.runtime.MemStats.Lookups": {
+    "value": 2
+  },
+  "skipper.runtime.MemStats.MCacheInuse": {
+    "value": 6944
+  },
+  "skipper.runtime.MemStats.MCacheSys": {
+    "value": 16384
+  },
+  "skipper.runtime.MemStats.MSpanInuse": {
+    "value": 77368
+  },
+  "skipper.runtime.MemStats.MSpanSys": {
+    "value": 81920
+  },
+  "skipper.runtime.MemStats.Mallocs": {
+    "value": 1459
+  },
+  "skipper.runtime.MemStats.NextGC": {
+    "value": 4194304
+  },
+  "skipper.runtime.MemStats.NumGC": {
+    "value": 0
+  },
+  "skipper.runtime.MemStats.PauseTotalNs": {
+    "value": 683352
+  },
+  "skipper.runtime.MemStats.StackInuse": {
+    "value": 524288
+  },
+  "skipper.runtime.MemStats.StackSys": {
+    "value": 524288
+  },
+  "skipper.runtime.MemStats.Sys": {
+    "value": 9246968
+  },
+  "skipper.runtime.MemStats.TotalAlloc": {
+    "value": 35127624
+  },
+  "skipper.runtime.NumCgoCall": {
+    "value": 0
+  },
+  "skipper.runtime.NumGoroutine": {
+    "value": 11
+  },
+  "skipper.runtime.NumThread": {
+    "value": 9
+  }
+},
+"histograms": {
+  "skipper.runtime.MemStats.PauseNs": {
+    "75%": 82509.25,
+    "95%": 132609,
+    "99%": 132609,
+    "99.9%": 132609,
+    "count": 12,
+    "max": 132609,
+    "mean": 56946,
+    "median": 39302.5,
+    "min": 28749,
+    "stddev": 31567.015005117817
+  }
+}
+
+

Go metrics - Prometheus

+
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 4.7279e-05
+go_gc_duration_seconds{quantile="0.25"} 5.9291e-05
+go_gc_duration_seconds{quantile="0.5"} 7.4e-05
+go_gc_duration_seconds{quantile="0.75"} 9.55e-05
+go_gc_duration_seconds{quantile="1"} 0.000199667
+go_gc_duration_seconds_sum 0.001108339
+go_gc_duration_seconds_count 13
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 13
+# HELP go_info Information about the Go environment.
+# TYPE go_info gauge
+go_info{version="go1.21.3"} 1
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 6.4856e+06
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 4.1797384e+07
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.462151e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 507460
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 4.549296e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 6.4856e+06
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 7.421952e+06
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 8.372224e+06
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 70159
+# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes gauge
+go_memstats_heap_released_bytes 6.47168e+06
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 1.5794176e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.6987664839728708e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 577619
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 19200
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 31200
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 302904
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 309624
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 8.206808e+06
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 2.402169e+06
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 983040
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 983040
+# HELP go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 2.5531656e+07
+# HELP go_threads Number of OS threads created.
+# TYPE go_threads gauge
+go_threads 22
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 0.42
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 60000
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 10
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 4.2811392e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.69876646736e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 2.823462912e+09
+# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
+# TYPE process_virtual_memory_max_bytes gauge
+process_virtual_memory_max_bytes 1.8446744073709552e+19
+
+

Redis - Rate limiting metrics

+

System metrics exposed by the redisclient:

+
    +
  • skipper.swarm.redis.shards: known Redis shards to the skipper ringclient
  • +
  • skipper.swarm.redis.hits: number of times free connection was found in the pool
  • +
  • skipper.swarm.redis.misses: number of times free connection was NOT found in the pool
  • +
  • skipper.swarm.redis.timeouts: number of times a wait timeout occurred
  • +
  • skipper.swarm.redis.staleconns: number of stale connections removed from the pool
  • +
  • skipper.swarm.redis.idleconns: number of idle connections in the pool
  • +
  • skipper.swarm.redis.totalconns: number of total connections in the pool
  • +
+

Timer metrics for the latencies and errors of the communication with the auxiliary Redis instances are enabled +by the default, and exposed among the timers via the following keys:

+
    +
  • skipper.swarm.redis.query.allow.success: successful allow requests to the rate limiter, ungrouped
  • +
  • skipper.swarm.redis.query.allow.failure: failed allow requests to the rate limiter, ungrouped, where the redis + communication failed
  • +
  • skipper.swarm.redis.query.retryafter.success.: successful allow requests to the rate limiter, grouped + by the rate limiter group name when used
  • +
  • skipper.swarm.redis.query.retryafter.failure.: failed allow requests to the rate limiter, ungrouped, + where the redis communication faileds, grouped by the rate limiter group name when used
  • +
+

See more details about rate limiting at Rate limiting.

+

Open Policy Agent metrics

+

If Open Policy Agent filters are enabled, the following counters show up in the /metrics endpoint. The bundle-name is the first parameter of the filter so that for example increased error codes can be attributed to a specific source bundle / system.

+
    +
  • skipper.opaAuthorizeRequest.custom.decision.allow.<bundle-name>
  • +
  • skipper.opaAuthorizeRequest.custom.decision.deny.<bundle-name>
  • +
  • skipper.opaAuthorizeRequest.custom.decision.err.<bundle-name>
  • +
  • skipper.opaServeResponse.custom.decision.allow.<bundle-name>
  • +
  • skipper.opaServeResponse.custom.decision.deny.<bundle-name>
  • +
  • skipper.opaServeResponse.custom.decision.err.<bundle-name>
  • +
+

The following timer metrics are exposed per used bundle-name:

+
    +
  • skipper.opaAuthorizeRequest.custom.eval_time.<bundle-name>
  • +
  • skipper.opaServeResponse.custom.eval_time.<bundle-name>
  • +
+

RouteSRV metrics

+

RouteSRV metrics expose the following metrics in Prometheus format:

+
% curl http://127.0.0.1:9911/metrics
+# 8< Go metrics >8
+
+# HELP routesrv_backend_combined_duration_seconds Duration in seconds of a proxy backend combined.
+# TYPE routesrv_backend_combined_duration_seconds histogram
+routesrv_backend_combined_duration_seconds_bucket{le="0.005"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="0.01"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="0.025"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="0.05"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="0.1"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="0.25"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="0.5"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="1"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="2.5"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="5"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="10"} 5
+routesrv_backend_combined_duration_seconds_bucket{le="+Inf"} 5
+routesrv_backend_combined_duration_seconds_sum 0.001349441
+routesrv_backend_combined_duration_seconds_count 5
+# HELP routesrv_backend_duration_seconds Duration in seconds of a proxy backend.
+# TYPE routesrv_backend_duration_seconds histogram
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="0.005"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="0.01"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="0.025"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="0.05"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="0.1"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="0.25"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="0.5"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="1"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="2.5"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="5"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="10"} 5
+routesrv_backend_duration_seconds_bucket{host="",route="routersv",le="+Inf"} 5
+routesrv_backend_duration_seconds_sum{host="",route="routersv"} 0.001349441
+routesrv_backend_duration_seconds_count{host="",route="routersv"} 5
+# HELP routesrv_custom_gauges Gauges number of custom metrics.
+# TYPE routesrv_custom_gauges gauge
+routesrv_custom_gauges{key="polling_started_timestamp"} 1.69876646881321e+09
+routesrv_custom_gauges{key="redis_endpoints"} 1
+routesrv_custom_gauges{key="routes.byte"} 91378
+routesrv_custom_gauges{key="routes.initialized_timestamp"} 1.6987664689696188e+09
+routesrv_custom_gauges{key="routes.total"} 258
+routesrv_custom_gauges{key="routes.updated_timestamp"} 1.698766468969631e+09
+# HELP routesrv_custom_total Total number of custom metrics.
+# TYPE routesrv_custom_total counter
+routesrv_custom_total{key="200"} 5
+
+

Metrics explanation:

+
    +
  • routesrv_custom_total{key="200"} 5: + 5 requests were responded with status code 200 by the current routesrv + version v0.18.38.
  • +
  • routesrv_custom_gauges{key="polling_started_timestamp"} 1.69876646881321e+09: + routesrv started to poll at 1.69876646881321e+09 seconds of UNIX beginning + (2023-10-31 16:34:28 1705425/2097152 +0100).
  • +
  • routesrv_custom_gauges{key="redis_endpoints"} 1: + The routes endpoint /swarm/redis/shards was called 1 times
  • +
  • routesrv_custom_gauges{key="routes.byte"} 91378: + The number of bytes that are served at /routes is 91378.
  • +
  • routesrv_custom_gauges{key="routes.initialized_timestamp"} 1.6987664689696188e+09: + routesrv initialized the routes at 1.6987664689696188e+09 seconds of UNIX beginning. + (2023-10-31 16:34:28 1016719/1048576 +0100)
  • +
  • routesrv_custom_gauges{key="routes.total"} 258: + The number of routes that are served at /routes are 258.
  • +
  • routesrv_custom_gauges{key="routes.updated_timestamp"} 1.698766468969631e+09: + The last update of routes by routesrv was at 1.698766468969631e+09. + (2023-10-31 16:34:28 4066927/4194304 +0100)
  • +
+

If you want to read more about RouteSRV see deploy RouteSRV.

+

OpenTracing

+

Skipper has support for different OpenTracing API vendors, including +jaeger, +lightstep and +instana.

+

You can configure tracing implementations with a flag and pass +information and tags to the tracer:

+
-opentracing=<vendor> component-name=skipper-ingress ... tag=cluster=mycluster ...
+
+

The best tested tracer is the lightstep tracer, +because we use it in our setup. In case you miss something for your chosen tracer, please +open an issue or pull request in our repository.

+

Skipper creates up to 5 different +spans: +Spans

+

Some Tag details are added to all spans. +Span details

+

Ingress span

+

The Ingress span is active from getting the request in Skipper’s main +http handler, until we served the response to the client of the request.

+

Tags:

+
    +
  • component: skipper
  • +
  • hostname: ip-10-149-64-142
  • +
  • http.host: hostname.example.org
  • +
  • http.method: GET
  • +
  • http.path: /
  • +
  • http.remote_addr: 10.149.66.207:14574
  • +
  • http.url: /
  • +
  • span.kind: server
  • +
+

Ingress span with tags

+

Proxy span

+

The Proxy span starts just before executing the backend call.

+

Tags:

+
    +
  • component: skipper
  • +
  • hostname: ip-10-149-65-70
  • +
  • http.host: hostname.example.org
  • +
  • http.method: GET
  • +
  • http.path: /
  • +
  • http.remote_addr:
  • +
  • http.status_code: 200
  • +
  • http.url: http://10.2.0.11:9090/
  • +
  • skipper.route_id: kube_default__example_ingress_hostname_example_org____example_backend
  • +
  • span.kind: client
  • +
+

Proxy span with tags

+

Proxy span has logs to measure +connect (dial_context), +http roundtrip +(http_roundtrip), stream headers from backend to client +(stream_Headers), stream body from backend to client +(streamBody.byte) and events by the Go runtime.

+

Proxy span with logs

+

In addition to the manual instrumented proxy client logs, we use +net/http/httptrace.ClientTrace to show events by the Go +runtime. Full logs of the Proxy span:

+ +

Proxy span logs

+

Request filters span

+

The request filters span logs show start and end events for each filter applied.

+

request filter span with logs

+

Response filters span

+

The response filters span logs show start and end events for each filter applied.

+

response filter span with logs

+

Request and response filters event logging can be disabled by setting the -opentracing-log-filter-lifecycle-events=false flag and +span creation can be disabled altogether by the -opentracing-disable-filter-spans flag.

+

Auth filters span

+

Auth filters are special, because they might call an authorization +endpoint, which should be also visible in the trace. This span can +have the name “tokeninfo”, “tokenintrospection” or “webhook” depending +on the filter used by the matched route.

+

Tags: +- http.url: https://auth.example.org

+

The auth filters have trace log values start and end for DNS, TCP +connect, TLS handshake and connection pool:

+

tokeninfo auth filter span with logs

+

Open Policy Agent span

+

When one of the Open Policy Agent filters is used, child spans with the operation name open-policy-agent are added to the Trace.

+

The following tags are added to the Span, labels are taken from the OPA configuration YAML file as is and are not interpreted: +- opa.decision_id=<decision id that was recorded> +- opa.labels.<label1>=<value1>

+

The labels can for example be used to link to a specific decision in the control plane if they contain URL fragments for the receiving entity.

+

Redis rate limiting spans

+

Operation: redis_allow_check_card

+

Operation executed when the cluster rate limiting relies on the auxiliary Redis instances, and the Allow method +checks if the rate exceeds the configured limit.

+

Operation: redis_allow_add_card

+

Operation setting the counter of the measured request rate for cluster rate limiting with auxiliary Redis +instances.

+

Operation: redis_oldest_score

+

Operation querying the oldest request event for the rate limiting Retry-After header with cluster rate limiting +when used with auxiliary Redis instances.

+

Dataclient

+

Dataclients poll some kind of data source for routes. To change the +timeout for calls that polls a dataclient, which could be the +Kubernetes API, use the following option:

+
-source-poll-timeout int
+    polling timeout of the routing data sources, in milliseconds (default 3000)
+
+ +

Routing table information

+

Skipper allows you to get some runtime insights. You can get the +current routing table from skipper with in the +eskip file format:

+
curl localhost:9911/routes
+*
+-> "http://localhost:12345/"
+
+

You also can get the number of routes X-Count and the UNIX timestamp +of the last route table update X-Timestamp, using a HEAD request:

+
curl -I localhost:9911/routes
+HTTP/1.1 200 OK
+Content-Type: text/plain
+X-Count: 1
+X-Timestamp: 1517777628
+Date: Sun, 04 Feb 2018 20:54:31 GMT
+
+

The number of routes given is limited (1024 routes by default). +In order to control this limits, there are two parameters: limit and +offset. The limit defines the number of routes to get and +offset where to start the list. Thanks to this, it’s possible +to get the results paginated or getting all of them at the same time.

+
curl localhost:9911/routes?offset=200&limit=100
+
+

Passive health check (experimental)

+

Skipper has an option to automatically detect and mitigate faulty backend endpoints, this feature is called +Passive Health Check(PHC).

+

PHC works the following way: the entire uptime is divided in chunks of period, per every period Skipper calculates +the total amount of requests and amount of requests failed per every endpoint. While next period is going on, +the Skipper takes a look at previous period and if the amount of requests in the previous period is more than min-requests +and failed requests ratio is more than min-drop-probability for the given endpoints +then Skipper will send reduced (the more max-drop-probability and failed requests ratio +in previous period are, the stronger reduction is) amount of requests compared to amount sent without PHC. +If the ratio of unhealthy endpoints is more than max-unhealthy-endpoints-ratio then PHC becomes fail-open. This effectively means +if there are too many unhealthy endpoints PHC does not try to mitigate them any more and requests are sent like there is no PHC at all.

+

Having this, we expect less requests to fail because a lot of them would be sent to endpoints that seem to be healthy instead.

+

To enable this feature, you need to provide -passive-health-check option having forementioned parameters +(period, min-requests, min-drop-probability, max-drop-probability, max-unhealthy-endpoints-ratio) defined. +period, min-requests, max-drop-probability are required parameters, it is not possible for PHC to be enabled without +them explicitly defined by user. min-drop-probability is implicitly defined as 0.0 if not explicitly set by user. +max-unhealthy-endpoints-ratio is defined as 1.0 if not explicitly set by user. +Valid examples of -passive-health-check are:

+
    +
  • -passive-health-check=period=1s,min-requests=10,min-drop-probability=0.05,max-drop-probability=0.9,max-unhealthy-endpoints-ratio=0.3
  • +
  • -passive-health-check=period=1s,min-requests=10,max-drop-probability=0.9,max-unhealthy-endpoints-ratio=0.3
  • +
  • -passive-health-check=period=1s,min-requests=10,min-drop-probability=0.05,max-drop-probability=0.9
  • +
  • -passive-health-check=period=1s,min-requests=10,max-drop-probability=0.9
  • +
+

If -passive-health-check option is provided, but some required parameters are not defined, Skipper will not start. +Skipper will run without this feature, if no -passive-health-check is provided at all.

+

The parameters of -passive-health-check option are:

+
    +
  • period=<duration> - the duration of stats reset period
  • +
  • min-requests=<int> - the minimum number of requests per period per backend endpoint required to activate PHC for this endpoint
  • +
  • min-drop-probabilty=[0.0 <= p < max-drop-probability) - the minimum possible probability of unhealthy endpoint being not considered while choosing the endpoint for the given request. The same value is in fact used as minimal failed requests ratio for PHC to be enabled for this endpoint
  • +
  • max-drop-probabilty=(min-drop-probability < p <= 1.0] - the maximum possible probability of unhealthy endpoint being not considered +while choosing the endpoint for the given request
  • +
  • max-unhealthy-endpoints-ratio=[0.0 <= r <= 1.0] - the maximum ratio of unhealthy endpoints for PHC to try to mitigate ongoing requests
  • +
+

Metrics

+

A set of metrics will be exposed to track passive health check:

+
    +
  • passive-health-check.endpoints.dropped: Number of all endpoints dropped before load balancing a request, so after N requests and M endpoints are being dropped this counter would be N*M.
  • +
  • passive-health-check.requests.passed: Number of unique requests where PHC was able to avoid sending them to unhealthy endpoints.
  • +
+

Memory consumption

+

While Skipper is generally not memory bound, some features may require +some attention and planning regarding the memory consumption.

+

Potentially high memory consumers:

+
    +
  • Metrics
  • +
  • Filters
  • +
  • Slow Backends and chatty clients
  • +
+

Make sure you monitor backend latency, request and error rates. +Additionally use Go metrics for the number of goroutines and threads, GC pause +times should be less than 1ms in general, route lookup time, request +and response filter times and heap memory.

+

Metrics

+

Memory consumption of metrics are dependent on enabled command line +flags. Make sure to monitor Go metrics.

+

If you use -metrics-flavour=codahale,prometheus you enable both +storage backends.

+

If you use the Prometheus histogram buckets -histogram-metric-buckets.

+

If you enable route based -route-backend-metrics +-route-response-metrics -serve-route-metrics, error codes +-route-response-metrics and host -serve-host-metrics based metrics +it can count up. Please check the support listener endpoint (default +9911) to understand the usage:

+
% curl localhost:9911/metrics
+
+

By default, the route and host metrics include the labels for the request +HTTP response status code and the HTTP method. You can customize it by +setting -serve-method-metric=false and/or +-serve-status-code-metric=false. These two flags will enable or +disable the method and status code labels from your metrics reducing the +number of metrics generated and memory consumption.

+

Filters

+

Ratelimit filter clusterClientRatelimit implementation using the +swim based protocol, consumes roughly 15MB per filter for 100.000 +individual clients and 10 maximum hits. Make sure you monitor Go +metrics. Ratelimit filter clusterClientRatelimit implementation +using the Redis ring based solution, adds 2 additional roundtrips to +redis per hit. Make sure you monitor redis closely, because skipper +will fallback to allow traffic if redis can not be reached.

+

Slow Backends

+

Skipper has to keep track of all active connections and http +Requests. Slow Backends can pile up in number of connections, that +will consume each a little memory per request. If you have high +traffic per instance and a backend times out it can start to increase +your memory consumption. Make sure you monitor backend latency, +request and error rates.

+

Default Filters

+

Default filters will be applied to all routes created or updated.

+

Global Default Filters

+

Global default filters can be specified via two different command line +flags -default-filters-prepend and +-default-filters-append. Filters passed to these command line flags +will be applied to all routes. The difference prepend and append is +where in the filter chain these default filters are applied.

+

For example a user specified the route: r: * -> setPath("/foo") +If you run skipper with -default-filters-prepend=enableAccessLog(4,5) -> lifo(100,100,"10s"), +the actual route will look like this: r: * -> enableAccessLog(4,5) -> lifo(100,100,"10s") -> setPath("/foo"). +If you run skipper with -default-filters-append=enableAccessLog(4,5) -> lifo(100,100,"10s"), +the actual route will look like this: r: * -> setPath("/foo") -> enableAccessLog(4,5) -> lifo(100,100,"10s").

+

Kubernetes Default Filters

+

Kubernetes dataclient supports default filters. You can enable this feature by +specifying default-filters-dir. The defined directory must contain per-service +filter configurations, with file name following the pattern ${service}.${namespace}. +The content of the files is the actual filter configurations. These filters are then +prepended to the filters already defined in Ingresses.

+

The default filters are supposed to be used only if the filters of the same kind +are not configured on the Ingress resource. Otherwise, it can and will lead to +potentially contradicting filter configurations and race conditions, i.e. +you should specify a specific filter either on the Ingress resource or as +a default filter.

+

Scheduler

+

HTTP request schedulers change the queuing behavior of in-flight +requests. A queue has two generic properties: a limit of requests and +a concurrency level. The limit of request can be unlimited (unbounded +queue), or limited (bounded queue). The concurrency level is either +limited or unlimited.

+

The default scheduler is an unbounded first in first out (FIFO) queue, +that is provided by Go’s standard library.

+

Skipper provides 2 last in first out (LIFO) filters to change the +scheduling behavior.

+

On failure conditions, Skipper will return HTTP status code:

+
    +
  • 503 if the queue is full, which is expected on the route with a failing backend
  • +
  • 502 if queue access times out, because the queue access was not fast enough
  • +
  • 500 on unknown errors, please create an issue
  • +
+

The problem

+

Why should you use boundaries to limit concurrency level and limit the +queue?

+

The short answer is resiliency. If you have one route, that is timing +out, the request queue of skipper will pile up and consume much more +memory, than before. This can lead to out of memory kill, which will +affect all other routes. In this +comment +you can see the memory usage increased in Go’s +standard library bufio package.

+

Why LIFO queue instead of FIFO queue?

+

In normal cases the queue should not contain many requests. Skipper is +able to process many requests concurrently without letting the queue +piling up. In overrun situations you might want to process at least +some fraction of requests instead of timing out all requests. LIFO +would not time out all requests within the queue, if the backend is +capable of responding some requests fast enough.

+

A solution

+

Skipper has two filters lifo() and +lifoGroup(), that can limit +the number of requests for a route. A documented load +test +shows the behavior with an enabled lifo(100,100,"10s") filter for +all routes, that was added by default. You can do this, if you pass +the following flag to skipper: +-default-filters-prepend=lifo(100,100,"10s").

+

Both LIFO filters will, use a last in first out queue to handle most +requests fast. If skipper is in an overrun mode, it will serve some +requests fast and some will timeout. The idea is based on Dropbox +bandaid proxy, which is not opensource. Dropbox +shared their idea in a public +blogpost.

+

Skipper’s scheduler implementation makes sure, that one route will not +interfere with other routes, if these routes are not in the same +scheduler group. LifoGroup has +a user chosen scheduler group and +lifo() will get a per route unique +scheduler group.

+

URI standards interpretation

+

Considering the following request path: /foo%2Fbar, Skipper can handle +it in two different ways. The current default way is that when the +request is parsed purely relying on the Go stdlib url package, this +path becomes /foo/bar. According to RFC 2616 and RFC 3986, this may +be considered wrong, and this path should be parsed as /foo%2Fbar. +This is possible to achieve centrally, when Skipper is started with +the -rfc-patch-path flag. It is also possible to allow the default +behavior and only force the alternative interpretation on a per-route +basis with the rfcPath() filter. See +rfcPath().

+

If the second interpretation gets considered the right way, and the +other one a bug, then the default value for this flag may become to +be on.

+

Debugging Requests

+

Skipper provides filters, that can change +HTTP requests. You might want to inspect how the request was changed, +during the route processing and check the request that would be made +to the backend. Luckily with -debug-listener=:9922, Skipper can +provide you this information.

+

For example you have the following route:

+
kube_default__foo__foo_teapot_example_org_____foo: Host(/^foo[.]teapot[.]example[.]org$/) && PathSubtree("/")
+  -> setRequestHeader("X-Foo", "hello-world")
+  -> <roundRobin, "http://10.2.0.225:9090", "http://10.2.1.244:9090">;
+
+

If you sent now a request to the debug listener, that will be matched +by the route, Skipper will respond with information that show you the +matched route, the incoming request, the transformed request and all +predicates and filters involved in the route processing:

+
% curl -s http://127.0.0.1:9922/ -H"Host: foo.teapot.example.org" | jq .
+{
+  "route_id": "kube_default__foo__foo_teapot_example_org_____foo",
+  "route": "Host(/^foo[.]teapot[.]example[.]org$/) && PathSubtree(\"/\") -> setRequestHeader(\"X-Foo\", \"hello-world\") -> <roundRobin, \"http://10.2.0.225:9090\", \"http://10.2.1.244:9090\">",
+  "incoming": {
+    "method": "GET",
+    "uri": "/",
+    "proto": "HTTP/1.1",
+    "header": {
+      "Accept": [
+        "*/*"
+      ],
+      "User-Agent": [
+        "curl/7.49.0"
+      ]
+    },
+    "host": "foo.teapot.example.org",
+    "remote_address": "127.0.0.1:32992"
+  },
+  "outgoing": {
+    "method": "GET",
+    "uri": "",
+    "proto": "HTTP/1.1",
+    "header": {
+      "Accept": [
+        "*/*"
+      ],
+      "User-Agent": [
+        "curl/7.49.0"
+      ],
+      "X-Foo": [
+        "hello-world"
+      ]
+    },
+    "host": "foo.teapot.example.org"
+  },
+  "response_mod": {
+    "header": {
+      "Server": [
+        "Skipper"
+      ]
+    }
+  },
+  "filters": [
+    {
+      "name": "setRequestHeader",
+      "args": [
+        "X-Foo",
+        "hello-world"
+      ]
+    }
+  ],
+  "predicates": [
+    {
+      "name": "PathSubtree",
+      "args": [
+        "/"
+      ]
+    }
+  ]
+}
+
+

Profiling

+

Go profiling is explained in Go’s +diagnostics documentation.

+

Profiling skipper or RouteSRV

+

To enable profiling in skipper you have to use -enable-profile. This +will start a profiling route at /debug/pprof/profile on the support +listener, which defaults to :9911.

+

Profiling example

+

Start skipper with enabled profiling:

+
skipper -inline-routes='r1: * -> inlineContent("hello") -> <shunt>' -enable-profile
+
+

Use Go tool pprof to download profiling sample to analyze (sample is not from the example):

+
% go tool pprof http://127.0.0.1:9911
+Fetching profile over HTTP from http://127.0.0.1:9911/debug/pprof/profile
+Saved profile in /$HOME/pprof/pprof.skipper.samples.cpu.004.pb.gz
+File: skipper
+Build ID: 272c31a7bd60c9fabb637bdada37a3331a919b01
+Type: cpu
+Time: Oct 7, 2020 at 6:17pm (CEST)
+Duration: 30s, Total samples = 0
+No samples were found with the default sample value type.
+Try "sample_index" command to analyze different sample values.
+Entering interactive mode (type "help" for commands, "o" for options)
+(pprof) top
+Showing nodes accounting for 2140ms, 50.00% of 4280ms total
+Dropped 330 nodes (cum <= 21.40ms)
+Showing top 10 nodes out of 303
+      flat  flat%   sum%        cum   cum%
+     560ms 13.08% 13.08%      640ms 14.95%  syscall.Syscall
+     420ms  9.81% 22.90%      430ms 10.05%  runtime.nanotime
+     410ms  9.58% 32.48%      410ms  9.58%  runtime.futex
+     170ms  3.97% 36.45%      450ms 10.51%  runtime.mallocgc
+     170ms  3.97% 40.42%      180ms  4.21%  runtime.walltime
+     160ms  3.74% 44.16%      220ms  5.14%  runtime.scanobject
+      80ms  1.87% 46.03%       80ms  1.87%  runtime.heapBitsSetType
+      70ms  1.64% 47.66%       70ms  1.64%  runtime.epollwait
+      50ms  1.17% 48.83%      120ms  2.80%  compress/flate.(*compressor).deflate
+      50ms  1.17% 50.00%       50ms  1.17%  encoding/json.stateInString
+(pprof) web
+--> opens browser with SVG
+
+

pprof svg in web browser

+

Response serving

+

When serving a response from a backend, Skipper serves first the HTTP +response headers. After that Skipper streams the response payload and +uses one 8kB buffer to stream the data through this 8kB buffer. It +uses Flush() to make sure the 8kB chunk is written to the client. +Details can be observed by opentracing in the logs of the Proxy Span.

+

Forwarded headers

+

Skipper can be configured to add X-Forwarded-* headers:

+
  -forwarded-headers value
+        comma separated list of headers to add to the incoming request before routing
+        X-Forwarded-For sets or appends with comma the remote IP of the request to the X-Forwarded-For header value
+        X-Forwarded-Host sets X-Forwarded-Host value to the request host
+        X-Forwarded-Port=<port> sets X-Forwarded-Port value
+        X-Forwarded-Proto=<http|https> sets X-Forwarded-Proto value
+  -forwarded-headers-exclude-cidrs value
+        disables addition of forwarded headers for the remote host IPs from the comma separated list of CIDRs
+
+

Converting Routes

+

For migrations you need often to convert X to Y. This is also true in +case you want to switch one predicate to another one or one filter to +another one. In skipper we have -edit-route and -clone-route that +either modifies matching routes or copy matching routes and change the +copy.

+

Example:

+

A route with edit-route +

% skipper -inline-routes='Path("/foo") -> setResponseHeader("X-Foo","bar") -> inlineContent("hi") -> <shunt>' \
+-edit-route='/inlineContent[(]["](.*)["][)]/inlineContent("modified \"$1\" response")/'
+[APP]INFO[0000] Expose metrics in codahale format
+[APP]INFO[0000] support listener on :9911
+[APP]INFO[0000] route settings, reset, route: : Path("/foo") -> setResponseHeader("X-Foo", "bar") -> inlineContent("hi") -> <shunt>
+[APP]INFO[0000] proxy listener on :9090
+[APP]INFO[0000] TLS settings not found, defaulting to HTTP
+[APP]INFO[0000] route settings received
+[APP]INFO[0000] route settings applied
+

+

Modified route: +

curl localhost:9911/routes
+Path("/foo")
+  -> setResponseHeader("X-Foo", "bar")
+  -> inlineContent("modified \"hi\" response")
+  -> <shunt>
+

+

Modified response body: +

% curl -v http://localhost:9090/foo
+*   Trying ::1...
+* Connected to localhost (::1) port 9090 (#0)
+> GET /foo HTTP/1.1
+> Host: localhost:9090
+> User-Agent: curl/7.49.0
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Content-Length: 22
+< Content-Type: text/plain; charset=utf-8
+< Server: Skipper
+< X-Foo: bar
+< Date: Thu, 14 Oct 2021 08:41:53 GMT
+<
+* Connection #0 to host localhost left intact
+modified "hi" response
+

+

With edit-route and -clone-route you can modify Predicates and Filters to convert from +SourceFromLast() to ClientIP, for example if you want to migrate +AWS cloud load balancer from Application Load Balancer to Network +Load Balancer, you can use +-clone-route='/SourceFromLast[(](.*)[)]/ClientIP($1)/' to create +additional routes for

+

r: SourceFromLast("9.0.0.0/8","2001:67c:20a0::/48") -> ...`
+
+to change to +
r: SourceFromLast("9.0.0.0/8","2001:67c:20a0::/48") -> ...`
+clone_r: ClientIP("9.0.0.0/8","2001:67c:20a0::/48") -> ...`
+
+for migration time.

+

/ symbol is not the only option for the separator for -edit-route and -clone-route, any +first symbol you will specify in those options could be used as separator. This could be useful +for IP mask changes, for example, you can use -edit-route='#/26#/24#. In this case +

r: SourceFromLast("9.0.0.0/26","2001:67c:20a0::/48") -> ...`
+
+will be changed to +
r: SourceFromLast("9.0.0.0/24","2001:67c:20a0::/48") -> ...`
+

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/reference/architecture/index.html b/reference/architecture/index.html new file mode 100644 index 0000000000..08b338d2da --- /dev/null +++ b/reference/architecture/index.html @@ -0,0 +1,1433 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Architecture - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Architecture

+

Skipper is written as a library and is also a multi binary project with +2 binaries, named skipper and eskip. Skipper is the HTTP proxy +and eskip is a CLI application to verify, print, update or delete +Skipper routes.

+

Skipper’s internal architecture is split into different packages. The +skipper package has connections to multiple dataclient, that pull +information from different sources, for example static routes from an +eskip file or dynamic routes from Kubernetes ingress objects.

+

The proxy package gets the routes populated by skipper and has +always a current routing table which will be replaced on change.

+

A route is one entry in the routing table. A route consists of one or +more predicate, that are used to find a route for a given HTTP +request. A route can also have one or more filter, that can modify +the content of the request or response. A route can point to a +backend, it can be a <shunt>, meaning that skipper serves the requests +for the route, a <loopback>, meaning that the requests will be +matched against the routing table again after filters have modified +them, or a <dynamic>, meaning that the target url can be set dynamically +by a filter (e.g. setDynamicBackendUrl).

+

Opentracing API is supported via +skipper-plugins. For +example Jaeger is supported.

+

Skipper has a rich set of metrics that are exposed as json, but can be +exported in Prometheus format.

+

Skipper's architecture

+

Route processing

+

Package skipper has a Go http.Server and does the ListenAndServe +call with the loggingHandler wrapped proxy. The loggingHandler +is basically a middleware for the proxy providing access logs and +both implement the plain Go http.Handler interface.

+

For each incoming http.Request the proxy will create a request +context and enhance it with an Opentracing API Span. +It will check proxy global ratelimits first and after that lookup the +route in the routing table. After that skipper will apply all request +filters, that can modify the http.Request. It will then check the +route local ratelimits, the circuitbreakers and do the backend +call. If the backend call got a TCP or TLS connection error in a +loadbalanced route, skipper will do a retry to another backend of that +loadbalanced group automatically. Just before the response to the +caller, skipper will process the response filters, that can change the +http.Response.

+

In two special cases, skipper doesn’t forward the request to the +backend. When the route is shunted (<shunt>), skipper serves the +request alone, by using only the filters. When the route is a +<loopback>, the request is passed to the routing table for finding +another route, based on the changes that the filters made to the +request.

+

Skipper's request and response processing

+

Routing mechanism

+

The routing executes the following steps in the typical case:

+
    +
  1. +

    Select the best fitting route by matching the request against the + predicates. When no route found, respond with 404 (unless the default + status code is configured to a different value).

    +
  2. +
  3. +

    Execute the filters defined in the route in normal order on the + request. The filters may or may not alter the request.

    +
  4. +
  5. +

    Forward the request to the backend defined by the route and receive + a response.

    +
  6. +
  7. +

    Execute the filters defined in the route in reverse order on the + response. The filters may or may not alter the response.

    +
  8. +
  9. +

    Respond to the incoming request with the resulting response.

    +
  10. +
+

Route matching

+

Skipper can handle a relatively large number of routes with acceptable +performance, while being able to use any attribute of the incoming HTTP +requests to distinguish between them. In order to be able to do so, the +path matching predicates (Path() and PathSubtree() but not PathRegexp()) +have a special role during route matching, which is a tradeoff by +design, and needs to be kept in mind to understand in some cases why a +certain route was matched for a request instead of another.

+

The route matching logic can be summed up as follows:

+
    +
  1. +

    Lookup in the path tree based on the Path() and the PathSubtree() + predicates, using the path component of the incoming request’s URI. Then + the remaining predicates of the found route(s) are evaluated.

    +
      +
    • +

      the path lookup is a radix tree with O(log(n)) time complexity

      +
    • +
    • +

      in case of intersecting paths, the more specific path is matched in + the tree

      +
    • +
    • +

      PathRegexp() is not used in the tree, but it is evaluated only after + Path() or PathSubtree(), just like e.g. Method() or Host().

      +
    • +
    +
  2. +
  3. +

    If step #1 matches multiple routes, which means there are multiple + routes in the same position of the path tree, and all other predicates + match the request, too, then the route with the highest + weight is matched.

    +
      +
    • +

      this is an O(n) lookup, but only on the same leaf

      +
    • +
    • +

      the root of the tree is considered a single leaf, so if not using the + Path() or PathSubtree() predicates, the entire lookup will become O(n) + over all the routes.

      +
    • +
    +
  4. +
  5. +

    If #2 results in multiple matching routes, then one route will be + selected. It is unspecified which one.

    +
  6. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/reference/backends/index.html b/reference/backends/index.html new file mode 100644 index 0000000000..92c551e438 --- /dev/null +++ b/reference/backends/index.html @@ -0,0 +1,1638 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Backends - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Backends

+ +

A backend is the last part of a route and will define the backend to +call for a given request that match the route.

+

Generic route example:

+
routeID: Predicate1 && Predicate2 -> filter1 -> filter2 -> <backend>;
+
+

Network backend

+

A network backend is an arbitrary HTTP or HTTPS URL, that will be +called by the proxy.

+

Route example with a network backend "https://www.zalando.de/": +

r0: Method("GET")
+    -> setRequestHeader("X-Passed-Skipper", "true")
+    -> "https://www.zalando.de/";
+

+

Proxy example with request in access log +

./bin/skipper -inline-routes 'r0: Method("GET") -> setRequestHeader("X-Passed-Skipper", "true") -> "https://www.zalando.de/";'
+[APP]INFO[0000] Expose metrics in codahale format
+[APP]INFO[0000] support listener on :9911
+[APP]INFO[0000] proxy listener on :9090
+[APP]INFO[0000] TLS settings not found, defaulting to HTTP
+[APP]INFO[0000] route settings, reset, route: r0: Method("GET") -> setRequestHeader("X-Passed-Skipper", "true") -> "https://www.zalando.de/"
+[APP]INFO[0000] route settings received
+[APP]INFO[0000] route settings applied
+
+::1 - - [05/Feb/2019:14:31:05 +0100] "GET / HTTP/1.1" 200 164408 "-" "curl/7.49.0" 457 localhost:9090 - -
+

+

Client example with request and response headers: +

$ curl -v localhost:9090 >/dev/null
+* Rebuilt URL to: localhost:9090/
+*   Trying ::1...
+* Connected to localhost (::1) port 9090 (#0)
+> GET / HTTP/1.1
+> Host: localhost:9090
+> User-Agent: curl/7.49.0
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Cache-Control: no-cache, no-store, must-revalidate
+< Content-Type: text/html
+< Date: Tue, 05 Feb 2019 13:31:38 GMT
+< Link: <https://mosaic01.ztat.net/base-assets/require-2.1.22.min.js>; rel="preload"; as="script"; nopush; crossorigin
+< Pragma: no-cache
+< Server: Skipper
+< Set-Cookie: ...; Path=/; Domain=zalando.de; Expires=Sun, 04 Aug 2019 13:31:38 GMT; Max-Age=15552000; HttpOnly; Secure
+< Vary: Accept-Encoding
+< Transfer-Encoding: chunked
+<
+{ [3205 bytes data]
+

+

Shunt backend

+

A shunt backend, <shunt>, will not call a backend, but reply directly from the +proxy itself. This can be used to shortcut, for example have a default +that replies with 404 or use skipper as a backend serving static +content in demos.

+

Route Example proxying to "https://www.zalando.de/" in case Host +header is set to "zalando" and rest will be served HTTP status code +404 with the body "no matching route":

+
r0: Host("zalando")
+    -> "https://www.zalando.de/";
+rest: *
+      -> status(404)
+      -> inlineContent("no matching route")
+      -> <shunt>;
+
+

Proxy configured as defined above with access log showing 404: +

$ ./bin/skipper -inline-routes 'r0: Host("zalando") -> "https://www.zalando.de/"; rest: * -> status(404) -> inlineContent("no matching route")  -> "http://localhost:9999/";'
+[APP]INFO[0000] Expose metrics in codahale format
+[APP]INFO[0000] support listener on :9911
+[APP]INFO[0000] proxy listener on :9090
+[APP]INFO[0000] TLS settings not found, defaulting to HTTP
+[APP]INFO[0000] route settings, reset, route: r0: Host(/zalando/) -> "https://www.zalando.de/"
+[APP]INFO[0000] route settings, reset, route: rest: * -> status(404) -> inlineContent("no matching route") -> "http://localhost:9999/"
+[APP]INFO[0000] route settings received
+[APP]INFO[0000] route settings applied
+::1 - - [05/Feb/2019:14:39:26 +0100] "GET / HTTP/1.1" 404 17 "-" "curl/7.49.0" 0 localhost:9090 - -
+

+

Client example with request and response headers: +

$ curl -sv localhost:9090
+* Rebuilt URL to: localhost:9090/
+*   Trying ::1...
+* Connected to localhost (::1) port 9090 (#0)
+> GET / HTTP/1.1
+> Host: localhost:9090
+> User-Agent: curl/7.49.0
+> Accept: */*
+>
+< HTTP/1.1 404 Not Found
+< Content-Length: 17
+< Content-Type: text/plain; charset=utf-8
+< Server: Skipper
+< Date: Tue, 05 Feb 2019 13:37:27 GMT
+<
+* Connection #0 to host localhost left intact
+no matching route
+

+

Loopback backend

+

The loopback backend, <loopback>, will lookup again the routing +table to a better matching route after processing the current route. +Like this you can add some headers or change the request path for some +specific matching requests.

+

Example:

+
    +
  • Route r0 is a route with loopback backend that will be matched for requests with paths that start with /api. The route will modify the http request removing /api in the path of the incoming request. In the second step of the routing the modified request will be matched by route r1.
  • +
  • Route r1 is a default route with a network backend to call "https://www.zalando.de/"
  • +
+
r0: PathSubtree("/api")
+    -> modPath("^/api", "")
+    -> <loopback>;
+r1: * -> "https://www.zalando.de/";
+
+

Proxy configured as defined above with access logs showing 404 for the first call and 200 for the second: +

$ ./bin/skipper -inline-routes 'r0: PathSubtree("/api") -> setRequestHeader("X-Passed-Skipper", "true") -> modPath(/^\/api/, "") -> <loopback>;
+r1: * -> "https://www.zalando.de/";'
+[APP]INFO[0000] Expose metrics in codahale format
+[APP]INFO[0000] route settings, reset, route: r0: PathSubtree("/api") -> setRequestHeader("X-Passed-Skipper", "true") -> modPath("^/api", "") -> <loopback>
+[APP]INFO[0000] route settings, reset, route: r1: * -> "https://www.zalando.de/"
+[APP]INFO[0000] route settings received
+[APP]INFO[0000] route settings applied
+[APP]INFO[0000] support listener on :9911
+[APP]INFO[0000] proxy listener on :9090
+[APP]INFO[0000] TLS settings not found, defaulting to HTTP
+::1 - - [05/Feb/2019:14:54:14 +0100] "GET /api/foo HTTP/1.1" 404 98348 "-" "curl/7.49.0" 562 localhost:9090 - -
+::1 - - [05/Feb/2019:14:54:28 +0100] "GET /api HTTP/1.1" 200 164408 "-" "curl/7.49.0" 120 localhost:9090 - -
+

+

Client example with request and response headers: +

$ curl -sv localhost:9090/api/foo >/dev/null
+*   Trying ::1...
+* Connected to localhost (::1) port 9090 (#0)
+> GET /api/foo HTTP/1.1
+> Host: localhost:9090
+> User-Agent: curl/7.49.0
+> Accept: */*
+>
+< HTTP/1.1 404 Not Found
+< Content-Language: de-DE
+< Content-Type: text/html;charset=UTF-8
+< Date: Tue, 05 Feb 2019 14:00:33 GMT
+< Transfer-Encoding: chunked
+<
+{ [2669 bytes data]
+* Connection #0 to host localhost left intact
+
+
+$ curl -sv localhost:9090/api >/dev/null
+*   Trying ::1...
+* Connected to localhost (::1) port 9090 (#0)
+> GET /api HTTP/1.1
+> Host: localhost:9090
+> User-Agent: curl/7.49.0
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Cache-Control: no-cache, no-store, must-revalidate
+< Content-Type: text/html
+< Date: Tue, 05 Feb 2019 14:00:44 GMT
+< Link: <https://mosaic01.ztat.net/base-assets/require-2.1.22.min.js>; rel="preload"; as="script"; nopush; crossorigin
+< Transfer-Encoding: chunked
+<
+{ [3491 bytes data]
+

+

If the request processing reaches the maximum number of loopbacks (by default max=9), the routing will +result in an error.

+

Dynamic backend

+

The dynamic backend, <dynamic>, will get the backend to call by data +provided by filters. This allows skipper as library users to do proxy +calls to a certain target from their own implementation dynamically +looked up by their filters.

+

Example shows how to set a target by a provided filter, which would be similar to a network backend:

+
r0: * -> setDynamicBackendUrl("https://www.zalando.de") -> <dynamic>;
+
+

Proxy configured as defined above with access logs showing 200 for the call: +

$ ./bin/skipper -inline-routes 'r0: * -> setDynamicBackendUrl("https://www.zalando.de") -> <dynamic>;'
+[APP]INFO[0000] Expose metrics in codahale format
+[APP]INFO[0000] support listener on :9911
+[APP]INFO[0000] proxy listener on :9090
+[APP]INFO[0000] TLS settings not found, defaulting to HTTP
+[APP]INFO[0000] route settings, reset, route: r0: * -> setDynamicBackendUrl("https://www.zalando.de") -> <dynamic>
+[APP]INFO[0000] route settings received
+[APP]INFO[0000] route settings applied
+::1 - - [05/Feb/2019:15:09:34 +0100] "GET / HTTP/1.1" 200 164408 "-" "curl/7.49.0" 132 localhost:9090 - -
+

+

Client example with request and response headers: +

$ curl -sv localhost:9090/ >/dev/null
+*   Trying ::1...
+* Connected to localhost (::1) port 9090 (#0)
+> GET / HTTP/1.1
+> Host: localhost:9090
+> User-Agent: curl/7.49.0
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Cache-Control: no-cache, no-store, must-revalidate
+< Content-Type: text/html
+< Date: Tue, 05 Feb 2019 14:09:34 GMT
+< Link: <https://mosaic01.ztat.net/base-assets/require-2.1.22.min.js>; rel="preload"; as="script"; nopush; crossorigin
+< Pragma: no-cache
+< Server: Skipper
+< Transfer-Encoding: chunked
+<
+{ [3491 bytes data]
+* Connection #0 to host localhost left intact
+

+

When no filters modifying the target are set (e.g. r0: * -> <dynamic>;), the +target host defaults to either the Host header or the host name given in the +URL, and the target scheme defaults to either https when TLS is +configured or http when TLS is not configured.

+

Load Balancer backend

+

The loadbalancer backend, <$algorithm, "backend1", "backend2">, will +balance the load across all given backends using the algorithm set in +$algorithm. If $algorithm is not specified it will use the default +algorithm set by Skipper at start.

+

Current implemented algorithms:

+ +

All algorithms except powerOfRandomNChoices support fadeIn filter.

+

Route example with 2 backends and the roundRobin algorithm: +

r0: * -> <roundRobin, "http://127.0.0.1:9998", "http://127.0.0.1:9997">;
+

+

Route example with 2 backends and the random algorithm: +

r0: * -> <random, "http://127.0.0.1:9998", "http://127.0.0.1:9997">;
+

+

Route example with 2 backends and the consistentHash algorithm: +

r0: * -> <consistentHash, "http://127.0.0.1:9998", "http://127.0.0.1:9997">;
+

+

Route example with 2 backends and the powerOfRandomNChoices algorithm: +

r0: * -> <powerOfRandomNChoices, "http://127.0.0.1:9998", "http://127.0.0.1:9997">;
+

+

Proxy with roundRobin loadbalancer and two backends: +

$ ./bin/skipper -inline-routes 'r0: *  -> <roundRobin, "http://127.0.0.1:9998", "http://127.0.0.1:9997">;'
+[APP]INFO[0000] Expose metrics in codahale format
+[APP]INFO[0000] route settings, reset, route: r0: * -> <roundRobin, "http://127.0.0.1:9998", "http://127.0.0.1:9997">
+[APP]INFO[0000] support listener on :9911
+[APP]INFO[0000] route settings received
+[APP]INFO[0000] proxy listener on :9090
+[APP]INFO[0000] TLS settings not found, defaulting to HTTP
+[APP]INFO[0000] route settings applied
+::1 - - [05/Feb/2019:15:39:06 +0100] "GET / HTTP/1.1" 200 1 "-" "curl/7.49.0" 3 localhost:9090 - -
+::1 - - [05/Feb/2019:15:39:07 +0100] "GET / HTTP/1.1" 200 1 "-" "curl/7.49.0" 0 localhost:9090 - -
+::1 - - [05/Feb/2019:15:39:08 +0100] "GET / HTTP/1.1" 200 1 "-" "curl/7.49.0" 0 localhost:9090 - -
+::1 - - [05/Feb/2019:15:39:09 +0100] "GET / HTTP/1.1" 200 1 "-" "curl/7.49.0" 0 localhost:9090 - -
+

+

Backend1 returns “A” in the body: +

$ ./bin/skipper -address=":9998" -inline-routes 'r0: * -> inlineContent("A") -> <shunt>;'
+[APP]INFO[0000] Expose metrics in codahale format
+[APP]INFO[0000] support listener on :9911
+[APP]INFO[0000] proxy listener on :9998
+[APP]INFO[0000] TLS settings not found, defaulting to HTTP
+[APP]INFO[0000] route settings, reset, route: r0: * -> inlineContent("A") -> <shunt>
+[APP]INFO[0000] route settings received
+[APP]INFO[0000] route settings applied
+127.0.0.1 - - [05/Feb/2019:15:39:06 +0100] "GET / HTTP/1.1" 200 1 "-" "curl/7.49.0" 0 127.0.0.1:9998 - -
+127.0.0.1 - - [05/Feb/2019:15:39:08 +0100] "GET / HTTP/1.1" 200 1 "-" "curl/7.49.0" 0 127.0.0.1:9998 - -
+

+

Backend2 returns “B” in the body: +

$ ./bin/skipper -address=":9997" -inline-routes 'r0: * -> inlineContent("B") -> <shunt>;'
+[APP]INFO[0000] Expose metrics in codahale format
+[APP]INFO[0000] support listener on :9911
+[APP]INFO[0000] proxy listener on :9997
+[APP]INFO[0000] route settings, reset, route: r0: * -> inlineContent("B") -> <shunt>
+[APP]INFO[0000] TLS settings not found, defaulting to HTTP
+[APP]INFO[0000] route settings received
+[APP]INFO[0000] route settings applied
+127.0.0.1 - - [05/Feb/2019:15:39:07 +0100] "GET / HTTP/1.1" 200 1 "-" "curl/7.49.0" 0 127.0.0.1:9997 - -
+127.0.0.1 - - [05/Feb/2019:15:39:09 +0100] "GET / HTTP/1.1" 200 1 "-" "curl/7.49.0" 0 127.0.0.1:9997 - -
+

+

Client: +

$ curl -s http://localhost:9090/
+A
+$ curl -s http://localhost:9090/
+B
+$ curl -s http://localhost:9090/
+A
+$ curl -s http://localhost:9090/
+B
+

+

Backend Protocols

+

Current implemented protocols:

+
    +
  • http: (default) http protocol
  • +
  • fastcgi: (experimental) directly connect Skipper with a FastCGI backend like PHP FPM.
  • +
+

Route example that uses FastCGI (experimental): +

php: * -> setFastCgiFilename("index.php") -> "fastcgi://127.0.0.1:9000";
+php_lb: * -> setFastCgiFilename("index.php") -> <roundRobin, "fastcgi://127.0.0.1:9000", "fastcgi://127.0.0.1:9001">;
+

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/reference/development/index.html b/reference/development/index.html new file mode 100644 index 0000000000..d261b29b7f --- /dev/null +++ b/reference/development/index.html @@ -0,0 +1,1794 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Development - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + +

Development

+ +

How to develop a Filter

+

A filter is part of a route and can change arbitrary http data in the +http.Request and http.Response path of a proxy.

+

The filter example shows a non trivial diff of a filter +implementation, that implements an authnz webhook. It shows global +settings passed via flags, user documentation, developer documentation +for library users, the filter implementation and some test +cases. Tests should test the actual filter implementation in a proxy +setup.

+

How to pass options to your filter

+

Set a default and a Usage string as const. Add a var to hold the +value and put the flag to the category, that makes the most sense.

+

If a filter, predicate or dataclient need Options passed from flags, +then you should register the filter in skipper.go, the main library +entrypoint. In case you do not need options from flags, use +MakeRegistry() in ./filters/builtin/builtin.go to register your +filter.

+
diff --git a/cmd/skipper/main.go b/cmd/skipper/main.go
+index 28f18f9..4530b85 100644
+--- a/cmd/skipper/main.go
++++ b/cmd/skipper/main.go
+@@ -59,9 +59,10 @@ const (
+    defaultOAuthTokeninfoTimeout          = 2 * time.Second
+    defaultOAuthTokenintrospectionTimeout = 2 * time.Second
++   defaultWebhookTimeout                 = 2 * time.Second
+
+    // generic:
+    addressUsage                         = "network address that skipper should listen on"
+@@ -141,6 +142,8 @@ const (
+    oauth2TokeninfoURLUsage              = "sets the default tokeninfo URL to query information about an incoming OAuth2 token in oauth2Tokeninfo filters"
+    oauth2TokeninfoTimeoutUsage          = "sets the default tokeninfo request timeout duration to 2000ms"
+    oauth2TokenintrospectionTimeoutUsage = "sets the default tokenintrospection request timeout duration to 2000ms"
++   webhookTimeoutUsage                  = "sets the webhook request timeout duration, defaults to 2s"
++
+    // connections, timeouts:
+    idleConnsPerHostUsage           = "maximum idle connections per backend host"
+    closeIdleConnsPeriodUsage       = "period of closing all idle connections in seconds or as a duration string. Not closing when less than 0"
+@@ -243,13 +246,14 @@ var (
+    oauth2TokeninfoURL              string
+    oauth2TokeninfoTimeout          time.Duration
+    oauth2TokenintrospectionTimeout time.Duration
++   webhookTimeout                  time.Duration
+
+    // connections, timeouts:
+    idleConnsPerHost           int
+@@ -351,13 +355,14 @@ func init() {
+    flag.DurationVar(&oauth2TokeninfoTimeout, "oauth2-tokeninfo-timeout", defaultOAuthTokeninfoTimeout, oauth2TokeninfoTimeoutUsage)
+    flag.DurationVar(&oauth2TokenintrospectionTimeout, "oauth2-tokenintrospect-timeout", defaultOAuthTokenintrospectionTimeout, oauth2TokenintrospectionTimeoutUsage)
++   flag.DurationVar(&webhookTimeout, "webhook-timeout", defaultWebhookTimeout, webhookTimeoutUsage)
+
+    // connections, timeouts:
+    flag.IntVar(&idleConnsPerHost, "idle-conns-num", proxy.DefaultIdleConnsPerHost, idleConnsPerHostUsage)
+@@ -536,13 +541,14 @@ func main() {
+        OAuthTokeninfoURL:              oauth2TokeninfoURL,
+        OAuthTokeninfoTimeout:          oauth2TokeninfoTimeout,
+        OAuthTokenintrospectionTimeout: oauth2TokenintrospectionTimeout,
++       WebhookTimeout:                 webhookTimeout,
+
+        // connections, timeouts:
+        IdleConnectionsPerHost:     idleConnsPerHost,
+
+diff --git a/skipper.go b/skipper.go
+index 10d5769..da46fe0 100644
+--- a/skipper.go
++++ b/skipper.go
+@@ -443,6 +443,9 @@ type Options struct {
+    // OAuthTokenintrospectionTimeout sets timeout duration while calling oauth tokenintrospection service
+    OAuthTokenintrospectionTimeout time.Duration
+
++   // WebhookTimeout sets timeout duration while calling a custom webhook auth service
++   WebhookTimeout time.Duration
++
+    // MaxAuditBody sets the maximum read size of the body read by the audit log filter
+    MaxAuditBody int
+ }
+@@ -677,7 +680,8 @@ func Run(o Options) error {
+        auth.NewOAuthTokenintrospectionAnyClaims(o.OAuthTokenintrospectionTimeout),
+        auth.NewOAuthTokenintrospectionAllClaims(o.OAuthTokenintrospectionTimeout),
+        auth.NewOAuthTokenintrospectionAnyKV(o.OAuthTokenintrospectionTimeout),
+-       auth.NewOAuthTokenintrospectionAllKV(o.OAuthTokenintrospectionTimeout))
++       auth.NewOAuthTokenintrospectionAllKV(o.OAuthTokenintrospectionTimeout),
++       auth.NewWebhook(o.WebhookTimeout))
+
+    // create a filter registry with the available filter specs registered,
+    // and register the custom filters
+
+

User documentation

+

Documentation for users should be done in docs/.

+
diff --git a/docs/filters.md b/docs/filters.md
+index d3bb872..a877062 100644
+--- a/docs/filters.md
++++ b/docs/filters.md
+@@ -382,6 +382,24 @@ basicAuth("/path/to/htpasswd")
+ basicAuth("/path/to/htpasswd", "My Website")
+ ```
+
++## webhook
++
++The `webhook` filter makes it possible to have your own authentication and
++authorization endpoint as a filter.
++
++Headers from the incoming request will be copied into the request that
++is being done to the webhook endpoint. Responses from the webhook with
++status code less than 300 will be authorized, rest unauthorized.
++
++Examples:
++
++```
++webhook("https://custom-webhook.example.org/auth")
++```
++
++The webhook timeout has a default of 2 seconds and can be globally
++changed, if skipper is started with `-webhook-timeout=2s` flag.
++
+ ## oauthTokeninfoAnyScope
+
+ If skipper is started with `-oauth2-tokeninfo-url` flag, you can use
+
+

Add godoc

+

Godoc is meant for developers using skipper as library, use doc.go +of the package to document generic functionality, usage and library +usage.

+
diff --git a/filters/auth/doc.go b/filters/auth/doc.go
+index 696d3fd..1d6e3a8 100644
+--- a/filters/auth/doc.go
++++ b/filters/auth/doc.go
+@@ -318,5 +318,12 @@ filter after the auth filter.
+     a: Path("/only-allowed-audit-log") -> oauthTokeninfoAnyScope("bar-w") -> auditLog() -> "https://internal.example.org/";
+     b: Path("/all-access-requests-audit-log") -> auditLog() -> oauthTokeninfoAnyScope("foo-r") -> "https://internal.example.org/";
+
++Webhook - webhook() filter
++
++The filter webhook allows you to have a custom authentication and
++authorization endpoint for a route.
++
++    a: Path("/only-allowed-by-webhook") -> webhook("https://custom-webhook.example.org/auth") -> "https://protected-backend.example.org/";
++
+ */
+ package auth
+
+

Filter implementation

+

A filter can modify the incoming http.Request before calling the +backend and the outgoing http.Response from the backend to the client.

+

A filter consists of at least two types a spec and a filter. +Spec consists of everything that is needed and known before a user +will instantiate a filter.

+

A spec will be created in the bootstrap procedure of a skipper +process. A spec has to satisfy the Spec interface Name() string and +CreateFilter([]interface{}) (filters.Filter, error).

+

The actual filter implementation has to satisfy the Filter +interface Request(filters.FilterContext) and Response(filters.FilterContext).

+
diff --git a/filters/auth/webhook.go b/filters/auth/webhook.go
+new file mode 100644
+index 0000000..f0632a6
+--- /dev/null
++++ b/filters/auth/webhook.go
+@@ -0,0 +1,84 @@
++package auth
++
++import (
++   "net/http"
++   "time"
++
++   "github.com/zalando/skipper/filters"
++)
++
++const (
++   WebhookName = "webhook"
++)
++
++type (
++   webhookSpec struct {
++       Timeout time.Duration
++   }
++   webhookFilter struct {
++       authClient *authClient
++   }
++)
++
++// NewWebhook creates a new auth filter specification
++// to validate authorization for requests.
++func NewWebhook(d time.Duration) filters.Spec {
++   return &webhookSpec{Timeout: d}
++}
++
++func (*webhookSpec) Name() string {
++   return WebhookName
++}
++
++// CreateFilter creates an auth filter. The first argument is an URL
++// string.
++//
++//     s.CreateFilter("https://my-auth-service.example.org/auth")
++//
++func (ws *webhookSpec) CreateFilter(args []interface{}) (filters.Filter, error) {
++   if l := len(args); l == 0 || l > 2 {
++       return nil, filters.ErrInvalidFilterParameters
++   }
++
++   s, ok := args[0].(string)
++   if !ok {
++       return nil, filters.ErrInvalidFilterParameters
++   }
++
++   ac, err := newAuthClient(s, ws.Timeout)
++   if err != nil {
++       return nil, filters.ErrInvalidFilterParameters
++   }
++
++   return &webhookFilter{authClient: ac}, nil
++}
++
++func copyHeader(to, from http.Header) {
++   for k, v := range from {
++       to[http.CanonicalHeaderKey(k)] = v
++   }
++}
++
++func (f *webhookFilter) Request(ctx filters.FilterContext) {
++   statusCode, err := f.authClient.getWebhook(ctx.Request())
++   if err != nil {
++       unauthorized(ctx, WebhookName, authServiceAccess, f.authClient.url.Hostname())
++   }
++   // redirects, auth errors, webhook errors
++   if statusCode >= 300 {
++       unauthorized(ctx, WebhookName, invalidAccess, f.authClient.url.Hostname())
++   }
++   authorized(ctx, WebhookName)
++}
++
++func (*webhookFilter) Response(filters.FilterContext) {}
+
+

Writing tests

+

Skipper uses normal table driven Go tests without frameworks.

+

This example filter test creates a backend, an auth service to be +called by our filter, and a filter configured by our table driven test.

+

In general we use real backends with dynamic port allocations. We call +these and inspect the http.Response to check, if we get expected +results for invalid and valid data.

+

Skipper has some helpers to create the test proxy in the proxytest +package. Backends can be created with httptest.NewServer as in the +example below.

+
diff --git a/filters/auth/webhook_test.go b/filters/auth/webhook_test.go
+new file mode 100644
+index 0000000..d43c4ea
+--- /dev/null
++++ b/filters/auth/webhook_test.go
+@@ -0,0 +1,128 @@
++package auth
++
++import (
++   "fmt"
++   "io"
++   "net/http"
++   "net/http/httptest"
++   "net/url"
++   "testing"
++   "time"
++
++   "github.com/zalando/skipper/eskip"
++   "github.com/zalando/skipper/filters"
++   "github.com/zalando/skipper/proxy/proxytest"
++)
++
++func TestWebhook(t *testing.T) {
++   for _, ti := range []struct {
++       msg        string
++       token      string
++       expected   int
++       authorized bool
++       timeout    bool
++   }{{
++       msg:        "invalid-token-should-be-unauthorized",
++       token:      "invalid-token",
++       expected:   http.StatusUnauthorized,
++       authorized: false,
++   }, {
++       msg:        "valid-token-should-be-authorized",
++       token:      testToken,
++       expected:   http.StatusOK,
++       authorized: true,
++   }, {
++       msg:        "webhook-timeout-should-be-unauthorized",
++       token:      testToken,
++       expected:   http.StatusUnauthorized,
++       authorized: false,
++       timeout:    true,
++   }} {
++       t.Run(ti.msg, func(t *testing.T) {
++           backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
++               w.WriteHeader(http.StatusOK)
++               io.WriteString(w, "Hello from backend")
++               return
++           }))
++           defer backend.Close()
++
++           authServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
++               if ti.timeout {
++                   time.Sleep(time.Second + time.Millisecond)
++               }
++
++               if r.Method != "GET" {
++                   w.WriteHeader(489)
++                   io.WriteString(w, "FAIL - not a GET request")
++                   return
++               }
++
++               tok := r.Header.Get(authHeaderName)
++               tok = tok[len(authHeaderPrefix):len(tok)]
++               switch tok {
++               case testToken:
++                   w.WriteHeader(200)
++                   fmt.Fprintln(w, "OK - Got token: "+tok)
++                   return
++               }
++               w.WriteHeader(402)                            //http.StatusUnauthorized)
++               fmt.Fprintln(w, "Unauthorized - Got token: ") //+tok)
++           }))
++           defer authServer.Close()
++
++           spec := NewWebhook(time.Second)
++
++           args := []interface{}{
++               "http://" + authServer.Listener.Addr().String(),
++           }
++           f, err := spec.CreateFilter(args)
++           if err != nil {
++               t.Errorf("error in creating filter for %s: %v", ti.msg, err)
++               return
++           }
++
++           f2 := f.(*webhookFilter)
++           defer f2.Close()
++
++           fr := make(filters.Registry)
++           fr.Register(spec)
++           r := &eskip.Route{Filters: []*eskip.Filter{{Name: spec.Name(), Args: args}}, Backend: backend.URL}
++
++           proxy := proxytest.New(fr, r)
++           defer proxy.Close()
++
++           reqURL, err := url.Parse(proxy.URL)
++           if err != nil {
++               t.Errorf("Failed to parse url %s: %v", proxy.URL, err)
++               return
++           }
++
++           req, err := http.NewRequest("GET", reqURL.String(), nil)
++           if err != nil {
++               t.Errorf("failed to create request %v", err)
++               return
++           }
++           req.Header.Set(authHeaderName, authHeaderPrefix+ti.token)
++
++           rsp, err := http.DefaultClient.Do(req)
++           if err != nil {
++               t.Errorf("failed to get response: %v", err)
++               return
++           }
++           defer rsp.Body.Close()
++
++           buf := make([]byte, 128)
++           var n int
++           if n, err = rsp.Body.Read(buf); err != nil && err != io.EOF {
++               t.Errorf("Could not read response body: %v", err)
++               return
++           }
++
++           t.Logf("%d %d", rsp.StatusCode, ti.expected)
++           if rsp.StatusCode != ti.expected {
++               t.Errorf("unexpected status code: %v != %v %d %s", rsp.StatusCode, ti.expected, n, buf)
++               return
++           }
++       })
++   }
++}
+
+

Using a debugger

+

Skipper supports plugins and to offer this support it uses the plugin +library. Due to a bug in the Go compiler as reported here a +debugger cannot be used. This issue will be fixed in Go 1.12 but until then the only workaround is to remove +references to the plugin library. The following patch can be used for debugging.

+
diff --git a/plugins.go b/plugins.go
+index 837b6cf..aa69f09 100644
+--- a/plugins.go
++++ b/plugins.go
+@@ -1,5 +1,6 @@
+ package skipper
+
++/*
+ import (
+    "fmt"
+    "os"
+@@ -13,8 +14,13 @@ import (
+    "github.com/zalando/skipper/filters"
+    "github.com/zalando/skipper/routing"
+ )
++*/
+
+ func (o *Options) findAndLoadPlugins() error {
++   return nil
++}
++
++/*
+    found := make(map[string]string)
+    done := make(map[string][]string)
+
+@@ -366,3 +372,4 @@ func readPluginConfig(plugin string) (conf []string, err error) {
+    }
+    return conf, nil
+ }
++*/
+
+

The patch can be applied with the git apply $PATCH_FILE command. Please do not commit the +modified plugins.go along with your changes.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/reference/egress/index.html b/reference/egress/index.html new file mode 100644 index 0000000000..8cd0a88ccf --- /dev/null +++ b/reference/egress/index.html @@ -0,0 +1,1483 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Egress - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Egress Proxy

+

Disclaimer: Egress features are probably not feature +complete. Please create Github Issues +to show your ideas about this topic.

+

The picture below shows an authentication use case with Bearer token +injection, to show the egress traffic flow: +egress traffic flow

+

Skipper has some features, which are egress specific. Some features, +for example dropRequestHeader or +ratelimit, might also be used, but are not +listed here:

+ +

Secrets Module

+

Disclaimer: the specified features might be changed to make use + cases work in the future.

+

Automated Secrets rotation

+

Secrets are read from files. Files can be rewritten by third party +tools to integrate whatever provider you want. +In Kubernetes you can write +Secrets +with an API and read them using a rotated, mounted files from skipper +for example.

+

To specify files or directories to find secrets, you can use +-credentials-paths command line flag. Filenames are used to define +the name of the secret, which will be used as a lookup key.

+

The files need to be created before skipper is started, and as of today +skipper doesn’t find new files automatically. This might change in the +future.

+

To change the default update interval, which defaults to 10m, you +can use the -credentials-update-interval command line flag.

+

Example bearer injection

+

Create file /tmp/secrets/mytoken, that contains mytoken:

+
mkdir /tmp/secrets; echo mytoken >/tmp/secrets/mytoken`.
+
+

start fake service

+
nc -l 8080
+
+

start skipper proxy

+
skipper -inline-routes='Host("host1") -> bearerinjector("/tmp/secrets/mytoken") -> "http://127.0.0.1:8080/"' -credentials-paths=/tmp/secrets -credentials-update-interval=10s
+..
+[APP]INFO[0004] Updated secret file: /tmp/secrets/mytoken
+..
+
+

Client calls skipper proxy

+
% curl -H"Host: host1" localhost:9090/foo
+^C
+
+

fake service shows

+
GET /foo HTTP/1.1
+Host: 127.0.0.1:8080
+User-Agent: curl/7.49.0
+Accept: */*
+Authorization: Bearer mytoken
+Accept-Encoding: gzip
+
+

Change the secret: echo changedtoken >/tmp/secrets/mytoken. +Wait until skipper logs: [APP]INFO[0010] update secret file: /tmp/secrets/mytoken

+

Restart fake service (CTRL-c to stop)

+
nc -l 8080
+
+

Client calls skipper proxy retry:

+
% curl -H"Host: host1" localhost:9090/foo
+^C
+
+

fake service shows

+
GET /foo HTTP/1.1
+Host: 127.0.0.1:8080
+User-Agent: curl/7.49.0
+Accept: */*
+Authorization: Bearer changedtoken
+Accept-Encoding: gzip
+
+

This example showed bearer injection with secrets rotation.

+
Reach multiple services
+

Often your service wants to reach multiple services, so you need to +differentiate these routes, somehow.

+

For example your service needs to access a.example.com and +b.example.com.

+

One example is to use .localhost domain, so a.localhost and +b.localhost in your application and in skipper routes you would +have:

+
a: Host("a.localhost") -> bearerinjector("/tmp/secrets/mytoken") -> "https://a.example.com"
+b: Host("b.localhost") -> bearerinjector("/tmp/secrets/mytoken") -> "https://b.example.com"
+
+

You can also use host aliases, in Linux /etc/hosts, or in Kubernetes +hostAliases:

+

Pod spec:

+
spec:
+  hostAliases:
+  - ip: 127.0.0.1
+    hostnames:
+    - a.local
+    - b.local
+
+

Future - TODOs

+

We want to experiment in how to best use skipper as egress proxy. One +idea is to implement forward proxy via HTTP CONNECT +and being able to use the routing to inject the right Authorization headers with the +bearerinjector filter, for example.

+

If you have ideas please add your thoughts in +one of the issues, +that match your idea or create a new one.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/reference/filters/index.html b/reference/filters/index.html new file mode 100644 index 0000000000..a5cce7c405 --- /dev/null +++ b/reference/filters/index.html @@ -0,0 +1,7612 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Filters - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Skipper Filters

+

The parameters can be strings, regex or float64 / int

+
    +
  • string is a string surrounded by double quotes (")
  • +
  • regex is a regular expression, surrounded by /, e.g. /^www\.example\.org(:\d+)?$/
  • +
  • int / float64 are usual (decimal) numbers like 401 or 1.23456
  • +
  • time is a string in double quotes, parseable by time.Duration)
  • +
+

Filters are a generic tool and can change HTTP header and body in the request and response path. +Filter can be chained using the arrow operator ->.

+

Example route with a match all, 2 filters and a backend:

+
all: * -> filter1 -> filter2 -> "http://127.0.0.1:1234/";
+
+

Template placeholders

+

Several filters support template placeholders (${var}) in string parameters.

+

Template placeholder is replaced by the value that is looked up in the following sources:

+
    +
  • request method (${request.method})
  • +
  • request host (${request.host})
  • +
  • request url path (${request.path})
  • +
  • request url rawquery (${request.rawQuery}, encoded request URL query without ?, e.g. q=foo&r=bar)
  • +
  • request url query (if starts with request.query. prefix, e.g ${request.query.q} is replaced by q query parameter value)
  • +
  • request headers (if starts with request.header. prefix, e.g ${request.header.Content-Type} is replaced by Content-Type request header value)
  • +
  • request cookies (if starts with request.cookie. prefix, e.g ${request.cookie.PHPSESSID} is replaced by PHPSESSID request cookie value)
  • +
  • request IP address
      +
    • ${request.source} - first IP address from X-Forwarded-For header or request remote IP address if header is absent, similar to Source predicate
    • +
    • ${request.sourceFromLast} - last IP address from X-Forwarded-For header or request remote IP address if header is absent, similar to SourceFromLast predicate
    • +
    • ${request.clientIP} - request remote IP address similar to ClientIP predicate
    • +
    +
  • +
  • response headers (if starts with response.header. prefix, e.g ${response.header.Location} is replaced by Location response header value)
  • +
  • filter context path parameters (e.g. ${id} is replaced by id path parameter value)
  • +
+

Missing value interpretation depends on the filter.

+

Example route that rewrites path using template placeholder:

+
u1: Path("/user/:id") -> setPath("/v2/user/${id}") -> <loopback>;
+
+

Example route that creates header from query parameter: +

r: Path("/redirect") && QueryParam("to") -> status(303) -> setResponseHeader("Location", "${request.query.to}") -> <shunt>;
+

+

status

+

Sets the response status code to the given value, with no regards to the backend response.

+

Parameters:

+
    +
  • status code (int)
  • +
+

Example:

+
route1: Host(/^all401\.example\.org$/) -> status(401) -> <shunt>;
+
+

comment

+

No operation, only to comment the route or a group of filters of a route

+

Parameters:

+
    +
  • msg (string)
  • +
+

Example:

+
route1: *
+    -> comment("nothing to see")
+    -> <shunt>;
+
+

annotate

+

Annotate the route, subsequent annotations using the same key will overwrite the value. +Other subsequent filters can use annotations to make decisions and should document the key and value they use.

+

Parameters:

+
    +
  • key (string)
  • +
  • value (string)
  • +
+

Example:

+
route1: *
+    -> annotate("never", "gonna give you up")
+    -> annotate("never", "gonna let you down")
+    -> <shunt>;
+
+

HTTP Headers

+

preserveHost

+

Sets the incoming Host: header on the outgoing backend connection.

+

It can be used to override the proxyPreserveHost behavior for individual routes.

+

Parameters: “true” or “false”

+
    +
  • “true” - use the Host header from the incoming request
  • +
  • “false” - use the host from the backend address
  • +
+

Example: +

route1: * -> preserveHost("true") -> "http://backend.example.org";
+

+

modRequestHeader

+

Replace all matched regex expressions in the given header.

+

Parameters:

+
    +
  • header name (string)
  • +
  • the expression to match (regex)
  • +
  • the replacement (string)
  • +
+

Example:

+
enforce_www: * -> modRequestHeader("Host", "^zalando\.(\w+)$", "www.zalando.$1") -> redirectTo(301);
+
+

setRequestHeader

+

Set headers for requests.

+

Header value may contain template placeholders. +If a template placeholder can’t be resolved then filter does not set the header.

+

Parameters:

+
    +
  • header name (string)
  • +
  • header value (string)
  • +
+

Examples:

+
foo: * -> setRequestHeader("X-Passed-Skipper", "true") -> "https://backend.example.org";
+
+
// Ratelimit per resource
+Path("/resource/:id") -> setRequestHeader("X-Resource-Id", "${id}") -> clusterClientRatelimit("resource", 10, "1m", "X-Resource-Id") -> "https://backend.example.org";
+
+

appendRequestHeader

+

Same as setRequestHeader, +but appends the provided value to the already existing ones.

+

dropRequestHeader

+

Removes a header from the request

+

Parameters:

+
    +
  • header name (string)
  • +
+

Example:

+
foo: * -> dropRequestHeader("User-Agent") -> "https://backend.example.org";
+
+

modResponseHeader

+

Same as modRequestHeader, only for responses

+

Parameters:

+
    +
  • header name (string)
  • +
  • the expression to match (regex)
  • +
  • the replacement (string)
  • +
+

Example:

+
do_not_avoid_caching: * -> modResponseHeader("cache-control", "no-cache", "cache") -> "https://zalando.de";
+
+

setResponseHeader

+

Same as setRequestHeader, only for responses

+

Example:

+
set_cookie_with_path_param:
+  Path("/path/:id") && Method("GET")
+  -> setResponseHeader("Set-Cookie", "cid=${id}; Max-Age=36000; Secure")
+  -> redirectTo(302, "/")
+  -> <shunt>
+
+

appendResponseHeader

+

Same as appendRequestHeader, only for responses

+

dropResponseHeader

+

Same as dropRequestHeader but for responses from the backend

+

setContextRequestHeader

+

Set headers for requests using values from the filter context (state bag). If the +provided key (second parameter) cannot be found in the state bag, then it doesn’t +set the header.

+

Parameters:

+
    +
  • header name (string)
  • +
  • key in the state bag (string)
  • +
+

The route in the following example checks whether the request is authorized with the +oauthTokeninfoAllScope() filter. This filter stores the authenticated user with “auth-user” +key in the context, and the setContextRequestHeader() filter in the next step stores it in +the header of the outgoing request with the X-Uid name:

+
foo: * -> oauthTokeninfoAllScope("address_service.all") -> setContextRequestHeader("X-Uid", "auth-user") -> "https://backend.example.org";
+
+

appendContextRequestHeader

+

Same as setContextRequestHeader, +but appends the provided value to the already existing ones.

+

setContextResponseHeader

+

Same as setContextRequestHeader, except for responses.

+

appendContextResponseHeader

+

Same as appendContextRequestHeader, except for responses.

+

copyRequestHeader

+

Copies value of a given request header to another header.

+

Parameters:

+
    +
  • source header name (string)
  • +
  • target header name (string)
  • +
+

Example:

+
foo: * -> copyRequestHeader("X-Foo", "X-Bar") -> "https://backend.example.org";
+
+

copyResponseHeader

+

Same as copyRequestHeader, except for responses.

+

corsOrigin

+

The filter accepts an optional variadic list of acceptable origin +parameters. If the input argument list is empty, the header will +always be set to * which means any origin is acceptable. Otherwise +the header is only set if the request contains an Origin header and +its value matches one of the elements in the input list. The header is +only set on the response.

+

Parameters:

+
    +
  • url (variadic string)
  • +
+

Examples:

+
corsOrigin()
+corsOrigin("https://www.example.org")
+corsOrigin("https://www.example.org", "http://localhost:9001")
+
+

headerToQuery

+

Filter which assigns the value of a given header from the incoming Request to a given query param

+

Parameters:

+
    +
  • The name of the header to pick from request
  • +
  • The name of the query param key to add to request
  • +
+

Examples:

+
headerToQuery("X-Foo-Header", "foo-query-param")
+
+

The above filter will set foo-query-param query param respectively to the X-Foo-Header header +and will override the value if the queryparam exists already

+

flowId

+

Sets an X-Flow-Id header, if it’s not already in the request. +This allows you to have a trace in your logs, that traces from +the incoming request on the edge to all backend services.

+

Flow IDs must be in a certain format to be reusable in skipper. Valid formats +depend on the generator used in skipper. Default generator creates IDs of +length 16 matching the following regex: ^[0-9a-zA-Z+-]+$

+

Parameters:

+
    +
  • no parameter: resets always the X-Flow-Id header to a new value
  • +
  • "reuse": only create X-Flow-Id header if not already set or if the value is invalid in the request
  • +
+

Example:

+
* -> flowId() -> "https://some-backend.example.org";
+* -> flowId("reuse") -> "https://some-backend.example.org";
+
+

xforward

+

Standard proxy headers. Appends the client remote IP to the X-Forwarded-For and sets the X-Forwarded-Host +header.

+

xforwardFirst

+

Same as xforward, but instead of appending the last remote IP, it prepends it to comply with the +approach of certain LB implementations.

+

HTTP Path

+

modPath

+

Replace all matched regex expressions in the path.

+

Parameters:

+
    +
  • the expression to match (regex)
  • +
  • the replacement (string)
  • +
+

Example:

+
rm_api: Path("/api") -> modPath("/api", "/") -> "https://backend.example.org";
+append_bar: Path("/foo") -> modPath("/foo", "/foo/bar") -> "https://backend.example.org";
+new_base: PathSubtree("/base") -> modPath("/base", "/new/base) -> "https://backend.example.org";
+rm_api_regex: Path("/api") -> modPath("^/api/(.*)/v2$", "/$1") -> "https://backend.example.org";
+
+

setPath

+

Replace the path of the original request to the replacement.

+

Parameters:

+
    +
  • the replacement (string)
  • +
+

The replacement may contain template placeholders. +If a template placeholder can’t be resolved then empty value is used for it.

+

HTTP Redirect

+

redirectTo

+

Creates an HTTP redirect response.

+

Parameters:

+
    +
  • redirect status code (int)
  • +
  • location (string) - optional
  • +
+

Example:

+
redirect1: PathRegexp(/^\/foo\/bar/) -> redirectTo(302, "/foo/newBar") -> <shunt>;
+redirect2: * -> redirectTo(301) -> <shunt>;
+
+
    +
  • Route redirect1 will do a redirect with status code 302 to https + with new path /foo/newBar for requests, that match the path /foo/bar.
  • +
  • Route redirect2 will do a https redirect with status code 301 for all + incoming requests that match no other route
  • +
+

see also redirect-handling

+

redirectToLower

+

Same as redirectTo, but replaces all strings to lower case.

+

HTTP Query

+

stripQuery

+

Removes the query parameter from the request URL, and if the first filter +parameter is "true", preserves the query parameter in the form of +x-query-param-<queryParamName>: <queryParamValue> headers, so that ?foo=bar +becomes x-query-param-foo: bar

+

Example: +

* -> stripQuery() -> "http://backend.example.org";
+* -> stripQuery("true") -> "http://backend.example.org";
+

+

setQuery

+

Set the query string ?k=v in the request to the backend to a given value.

+

Parameters:

+
    +
  • key (string)
  • +
  • value (string)
  • +
+

Key and value may contain template placeholders. +If a template placeholder can’t be resolved then empty value is used for it.

+

Example:

+
setQuery("k", "v")
+
+

dropQuery

+

Delete the query string ?k=v in the request to the backend for a +given key.

+

Parameters:

+
    +
  • key (string)
  • +
+

Key may contain template placeholders. +If a template placeholder can’t be resolved then empty value is used for it.

+

Example:

+
dropQuery("k")
+
+

queryToHeader

+

Filter which assigns the value of a given query param from the +incoming Request to a given Header with optional format string value.

+

Parameters:

+
    +
  • The name of the query param key to pick from request
  • +
  • The name of the header to add to request
  • +
  • The format string used to create the header value, which gets the + value from the query value as before
  • +
+

Examples:

+
queryToHeader("foo-query-param", "X-Foo-Header")
+queryToHeader("access_token", "Authorization", "Bearer %s")
+
+

The first filter will set X-Foo-Header header respectively to the foo-query-param query param +and will not override the value if the header exists already.

+

The second filter will set Authorization header to the +access_token query param with a prefix value Bearer and will +not override the value if the header exists already.

+

TLS

+

Filters that provide access to TLS data of a request.

+

tlsPassClientCertificates

+

This filter copies TLS client certificates encoded as pem into the +X-Forwarded-Tls-Client-Cert header. Multiple certificates are +separated by ,.

+

Example:

+
* -> tlsPassClientCertificates() -> "http://10.2.5.21:8080";
+
+

Diagnostics

+

These filters are meant for diagnostic or load testing purposes.

+

randomContent

+

Generate response with random text of specified length.

+

Parameters:

+
    +
  • length of data (int)
  • +
+

Example:

+
* -> randomContent(42) -> <shunt>;
+
+

repeatContent

+

Generate response of specified size from repeated text.

+

Parameters:

+
    +
  • text to repeat (string)
  • +
  • size of response in bytes (int)
  • +
+

Example:

+
* -> repeatContent("I will not waste chalk. ", 1000) -> <shunt>;
+
+

repeatContentHex

+

Generate response of specified size from repeated bytes.

+

Parameters:

+
    +
  • bytes to repeat (hexadecimal string)
  • +
  • size of response in bytes (int)
  • +
+

Example:

+
* -> repeatContentHex("00", 100) -> <shunt>
+
+
// Create binary response using size equal to the number of bytes to repeat, i.e. repeat once
+* -> repeatContentHex("68657861646563696d616c", 11) -> <shunt>
+
+

wrapContent

+

Add prefix and suffix to the response.

+

Parameters:

+
    +
  • prefix (string)
  • +
  • suffix (string)
  • +
+

Examples:

+
* -> wrapContent("foo", "baz") -> inlineContent("bar") -> <shunt>
+
+
// JSON array of 100 zeros
+* -> wrapContent("[", "0]") -> repeatContent("0, ", 297) -> <shunt>
+
+

wrapContentHex

+

Add prefix and suffix to the response.

+

Parameters:

+
    +
  • prefix (hexadecimal string)
  • +
  • suffix (hexadecimal string)
  • +
+

Examples:

+
* -> wrapContentHex("68657861", "6d616c") -> inlineContent("deci") -> <shunt>
+
+
// 1G of gzip-compressed text
+*
+-> setResponseHeader("Content-Encoding", "gzip")
+-> wrapContentHex(
+  "1f8b08000000000004ffecd6b10d00200804c05598c5b80a852d0ee422762ce61c2657d212f8bf9915bb6f9f8c51b9c26c1feec13fc80379a80ff4210ff0000ff0000ff0000ff0000ff0000ff0000ff0000ff0000ff0000ff0000ff0000ffce781070000ffffecd6810c000000c0207feb737c8ba2f8cd6f7ef39bdffce637bf",
+  "7dc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077ce0ff81000000ffffecd6810000000080207feb418ea278ce739ef39ce73ce739cf7de0f581000000ffff010000ffff5216994600ca9a3b"
+)
+-> repeatContentHex("7dc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077ce0ff81000000ffffecd6810c000000c0207feb737c8ba278ce739ef39ce73ce739cf", 8300624)
+-> <shunt>
+
+

You may use https://github.com/AlexanderYastrebov/unrepeat to decompose binary file into prefix, repeating content and suffix.

+

latency

+

Enable adding artificial latency

+

Parameters:

+
    +
  • latency in milliseconds (int) or in time as a string in double quotes, parseable by time.Duration)
  • +
+

Example:

+
* -> latency(120) -> "https://www.example.org";
+* -> latency("120ms") -> "https://www.example.org";
+
+

bandwidth

+

Enable bandwidth throttling.

+

Parameters:

+
    +
  • bandwidth in kb/s (int)
  • +
+

Example:

+
* -> bandwidth(30) -> "https://www.example.org";
+
+

chunks

+

Enables adding chunking responses with custom chunk size with +artificial delays in between response chunks. To disable delays, set +the second parameter to “0”.

+

Parameters:

+
    +
  • byte length (int)
  • +
  • time duration (time.Duration)
  • +
+

Example:

+
* -> chunks(1024, "120ms") -> "https://www.example.org";
+* -> chunks(1024, "0") -> "https://www.example.org";
+
+

backendLatency

+

Same as latency filter, but on the request path and not on +the response path.

+

backendBandwidth

+

Same as bandwidth filter, but on the request path and not on +the response path.

+

backendChunks

+

Same as chunks filter, but on the request path and not on +the response path.

+

tarpit

+

The tarpit filter discards the request and respond with a never ending +stream of chunked response payloads. The goal is to consume the client +connection without letting the client know what is happening.

+

Parameters:

+
    +
  • time duration (time.Duration)
  • +
+

Example:

+
* -> tarpit("1s") -> <shunt>;
+
+

The example will send every second a chunk of response payload.

+

absorb

+

The absorb filter reads and discards the payload of the incoming requests. +It logs with INFO level and a unique ID per request:

+
    +
  • the event of receiving the request
  • +
  • partial and final events for consuming request payload and total consumed byte count
  • +
  • the finishing event of the request
  • +
  • any read errors other than EOF
  • +
+

absorbSilent

+

The absorbSilent filter reads and discards the payload of the incoming requests. It only +logs read errors other than EOF.

+

uniformRequestLatency

+

The uniformRequestLatency filter introduces uniformly distributed +jitter latency within [mean-delta, mean+delta] interval for +requests. The first parameter is the mean and the second is delta. In +the example we would sleep for 100ms+/-10ms.

+

Example:

+
* -> uniformRequestLatency("100ms", "10ms") -> "https://www.example.org";
+
+

normalRequestLatency

+

The normalRequestLatency filter introduces normally distributed jitter +latency with configured mean value for requests. The first parameter +is µ (mean) and the second is σ as in +https://en.wikipedia.org/wiki/Normal_distribution.

+

Example:

+
* -> normalRequestLatency("10ms", "5ms") -> "https://www.example.org";
+
+

histogramRequestLatency

+

The histogramRequestLatency adds latency to requests according to the histogram distribution. +It expects a list of interleaved duration strings and numbers that defines a histogram. +Duration strings define boundaries of consecutive buckets and numbers define bucket weights. +The filter randomly selects a bucket with probability equal to its weight divided by the sum of all bucket weights +(which must be non-zero) and then sleeps for a random duration in between bucket boundaries.

+

Example:

+
r: * -> histogramRequestLatency("0ms", 50, "5ms", 0, "10ms", 30, "15ms", 20, "20ms") -> "https://www.example.org";
+
+

The example above adds a latency +* between 0ms and 5ms to 50% of the requests +* between 5ms and 10ms to 0% of the requests +* between 10ms and 15ms to 30% of the requests +* and between 15ms and 20ms to 20% of the requests.

+

uniformResponseLatency

+

The uniformResponseLatency filter introduces uniformly distributed +jitter latency within [mean-delta, mean+delta] interval for +responses. The first parameter is the mean and the second is delta. In +the example we would sleep for 100ms+/-10ms.

+

Example:

+
* -> uniformRequestLatency("100ms", "10ms") -> "https://www.example.org";
+
+

normalResponseLatency

+

The normalResponseLatency filter introduces normally distributed +jitter latency with configured mean value for responses. The first +parameter is µ (mean) and the second is σ as in +https://en.wikipedia.org/wiki/Normal_distribution.

+

Example:

+
* -> normalRequestLatency("10ms", "5ms") -> "https://www.example.org";
+
+

histogramResponseLatency

+

The histogramResponseLatency adds latency to responses according to the histogram distribution, similar to histogramRequestLatency.

+

logHeader

+

The logHeader filter prints the request line and the header, but not the body, to +stderr. Note that this filter should be used only in diagnostics setup and with care, +since the request headers may contain sensitive data, and they also can explode the +amount of logs. Authorization headers will be truncated in request and +response header logs. You can log request or response headers, which +defaults for backwards compatibility to request headers.

+

Parameters:

+
    +
  • no arg, similar to: “request”
  • +
  • “request” or “response” (string varargs)
  • +
+

Example:

+
* -> logHeader() -> "https://www.example.org";
+* -> logHeader("request") -> "https://www.example.org";
+* -> logHeader("response") -> "https://www.example.org";
+* -> logHeader("request", "response") -> "https://www.example.org";
+
+

logBody

+

The logBody filter logs the request or response body in chunks while +streaming. Chunks start with logBody("request") $flowid: or +logBody("response") $flowid:, such that you can find all chunks +belonging to a given flow. See also flowId() filter.

+

Note that this filter should be used only in diagnostics setup and +with care, since the request and response body may contain sensitive +data. Logs can also explode in the amount of bytes, so you have to +choose a limit. You can log request or response bodies. This filter +has close to no overhead other than the I/O created by the logger.

+

Parameters:

+
    +
  • type: “request” or “response” (string)
  • +
  • limit: maximum number of bytes to log (int)
  • +
+

Example:

+
* -> logBody("request", 1024) -> "https://www.example.org";
+* -> logBody("response", 1024) -> "https://www.example.org";
+* -> logBody("request", 1024) -> logBody("response", 1024) -> "https://www.example.org";
+
+

Timeout

+

backendTimeout

+

Configure backend timeout. Skipper responds with 504 Gateway Timeout status if obtaining a connection, +sending the request, and reading the backend response headers and body takes longer than the configured timeout. +However, if response streaming has already started it will be terminated, i.e. client will receive backend response +status and truncated response body.

+

Parameters:

+ +

Example:

+
* -> backendTimeout("10ms") -> "https://www.example.org";
+
+

readTimeout

+

Configure read timeout will set a read deadline on the server socket +connected to the client connecting to the proxy. Skipper will log 499 +client timeout with context canceled. We are not able to differentiate +between client hang up and read timeout.

+

Parameters:

+ +

Example:

+
* -> readTimeout("10ms") -> "https://www.example.org";
+
+

writeTimeout

+

Configure write timeout will set a write deadline on the server socket +connected to the client connecting to the proxy. Skipper will show +access logs as if the response was served as expected, but the client can +show an error. You can observe an increase in streaming errors via +metrics or a in opentracing proxy span you can see Tag +streamBody.byte with value streamBody error or in debug logs +something like +error while copying the response stream: write tcp 127.0.0.1:9090->127.0.0.1:38574: i/o timeout.

+

Parameters:

+ +

Example:

+
* -> writeTimeout("10ms") -> "https://www.example.org";
+
+

Shadow Traffic

+

tee

+

Provides a unix-like tee feature for routing.

+

Using this filter, the request will be sent to a “shadow” backend in addition +to the main backend of the route.

+

Example:

+
* -> tee("https://audit-logging.example.org") -> "https://foo.example.org";
+
+

This will send an identical request for foo.example.org to +audit-logging.example.org. Another use case could be using it for benchmarking +a new backend with some real traffic. This we call “shadow traffic”.

+

The above route will forward the request to https://foo.example.org as it +normally would do, but in addition to that, it will send an identical request to +https://audit-logging.example.org. The request sent to +https://audit-logging.example.org will receive the same method and headers, +and a copy of the body stream. The tee response is ignored for this shadow backend.

+

It is possible to change the path of the tee request, in a similar way to the +modPath filter:

+
Path("/api/v1") -> tee("https://api.example.org", "^/v1", "/v2" ) -> "http://api.example.org";
+
+

In the above example, one can test how a new version of an API would behave on +incoming requests.

+

teenf

+

The same as tee filter, but does not follow redirects from the backend.

+

teeLoopback

+

This filter provides a unix-like tee feature for routing, but unlike the tee, +this filter feeds the copied request to the start of the routing, including the +route lookup and executing the filters on the matched route.

+

It is recommended to use this solution instead of the tee filter, because the same +routing facilities are used for the outgoing tee requests as for the normal +requests, and all the filters and backend types are supported.

+

To ensure that the right route, or one of the right set of routes, is matched +after the loopback, use the filter together with the Tee +predicate, however, this is not mandatory if the request is changed via other +filters, such that other predicates ensure matching the right route. To avoid +infinite looping, the number of requests spawn from a single incoming request +is limited similarly as in case of the +loopback backend.

+

Parameters:

+
    +
  • tee group (string): a label identifying which routes should match the loopback + request, marked with the Tee predicate
  • +
+

Example, generate shadow traffic from 10% of the production traffic:

+
main: * -> "https://main-backend.example.org";
+split: Traffic(.1) -> teeLoopback("test-A") -> "https://main-backend.example.org";
+shadow: Tee("test-A") && True() -> "https://test-backend.example.org";
+
+

See also:

+ +

HTTP Body

+

compress

+

The filter, when executed on the response path, checks if the response entity can +be compressed. To decide, it checks the Content-Encoding, the Cache-Control and +the Content-Type headers. It doesn’t compress the content if the Content-Encoding +is set to other than identity, or the Cache-Control applies the no-transform +pragma, or the Content-Type is set to an unsupported value.

+

The default supported content types are: text/plain, text/html, application/json, +application/javascript, application/x-javascript, text/javascript, text/css, +image/svg+xml, application/octet-stream.

+

The default set of MIME types can be reset or extended by passing in the desired +types as filter arguments. When extending the defaults, the first argument needs to +be "...". E.g. to compress tiff in addition to the defaults:

+
* -> compress("...", "image/tiff") -> "https://www.example.org"
+
+

To reset the supported types, e.g. to compress only HTML, the “…” argument needs +to be omitted:

+
* -> compress("text/html") -> "https://www.example.org"
+
+

It is possible to control the compression level, by setting it as the first filter +argument, in front of the MIME types. The default compression level is best-speed. +The possible values are integers between 0 and 9 (inclusive), where 0 means +no-compression, 1 means best-speed and 11 means best-compression. Example:

+
* -> compress(11, "image/tiff") -> "https://www.example.org"
+
+

The filter also checks the incoming request, if it accepts the supported encodings, +explicitly stated in the Accept-Encoding header. +The filter currently supports by default gzip, deflate and br (can be overridden with flag compress-encodings). +It does not assume that the client accepts any encoding if the +Accept-Encoding header is not set. It ignores * in the Accept-Encoding header.

+

Supported encodings are prioritized on: +- quality value provided by client +- compress-encodings flag following order as provided if quality value is equal +- gzip, deflate, br in this order otherwise

+

When compressing the response, it updates the response header. It deletes the +Content-Length value triggering the proxy to always return the response with chunked +transfer encoding, sets the Content-Encoding to the selected encoding and sets the +Vary: Accept-Encoding header, if missing.

+

The compression happens in a streaming way, using only a small internal buffer.

+

decompress

+

The filter, when executed on the response path, checks if the response entity is +compressed by a supported algorithm (gzip, deflate, br). To decide, it checks the Content-Encoding +header.

+

When compressing the response, it updates the response header. It deletes the +Content-Length value triggering the proxy to always return the response with chunked +transfer encoding, deletes the Content-Encoding and the Vary headers, if set.

+

The decompression happens in a streaming way, using only a small internal buffer.

+

Example:

+
* -> decompress() -> "https://www.example.org"
+
+

static

+

Serves static content from the filesystem.

+

Parameters:

+
    +
  • Request path to strip (string)
  • +
  • Target base path in the filesystem (string)
  • +
+

Example:

+

This serves files from /srv/www/dehydrated when requested via /.well-known/acme-challenge/, +e.g. the request GET /.well-known/acme-challenge/foo will serve the file /srv/www/dehydrated/foo. +

acme: Host(/./) && Method("GET") && Path("/.well-known/acme-challenge/*")
+    -> static("/.well-known/acme-challenge/", "/srv/www/dehydrated") -> <shunt>;
+

+

Notes:

+
    +
  • redirects to the directory when a file index.html exists and it is requested, i.e. GET /foo/index.html redirects to /foo/ which serves then the /foo/index.html
  • +
  • serves the content of the index.html when a directory is requested
  • +
  • does a simple directory listing of files / directories when no index.html is present
  • +
+

inlineContent

+

Returns arbitrary content in the HTTP body.

+

Parameters:

+
    +
  • content (string)
  • +
  • content type (string) - optional
  • +
+

Example:

+
* -> inlineContent("<h1>Hello</h1>") -> <shunt>
+* -> inlineContent("[1,2,3]", "application/json") -> <shunt>
+* -> status(418) -> inlineContent("Would you like a cup of tea?") -> <shunt>
+
+

Content type will be automatically detected when not provided using https://mimesniff.spec.whatwg.org/#rules-for-identifying-an-unknown-mime-type algorithm. +Note that content detection algorithm does not contain any rules for recognizing JSON.

+
+

Note

+

inlineContent filter sets the response on request path and starts the response path immediately. +The rest of the filter chain and backend are ignored and therefore inlineContent filter must be the last in the chain.

+
+

inlineContentIfStatus

+

Returns arbitrary content in the HTTP body, if the response has the specified status code.

+

Parameters:

+
    +
  • status code (int)
  • +
  • content (string)
  • +
  • content type (string) - optional
  • +
+

Example:

+
* -> inlineContentIfStatus(404, "<p class=\"problem\">We don't have what you're looking for.</p>") -> "https://www.example.org"
+* -> inlineContentIfStatus(401, "{\"error\": \"unauthorized\"}", "application/json") -> "https://www.example.org"
+
+

The content type will be automatically detected when not provided.

+

blockContent

+

Block a request based on it’s body content.

+

The filter max buffer size is 2MiB by default and can be overridden with -max-matcher-buffer-size=<int>.

+

Parameters:

+
    +
  • toblockList (List of strings)
  • +
+

Example:

+
* -> blockContent("Malicious Content") -> "http://example.com";
+
+

blockContentHex

+

Block a request based on it’s body content.

+

The filter max buffer size is 2MiB by default and can be overridden with -max-matcher-buffer-size=<int>.

+

Parameters:

+
    +
  • toblockList (List of hex string)
  • +
+

Example:

+
* -> blockContentHex(`000a`) -> "http://example.com";
+* -> blockContentHex("deadbeef", "000a") -> "http://example.com";
+
+

sed

+

The filter sed replaces all occurrences of a pattern with a replacement string +in the response body.

+

Example:

+
editorRoute: * -> sed("foo", "bar") -> "https://www.example.org";
+
+

Example with larger max buffer:

+
editorRoute: * -> sed("foo", "bar", 64000000) -> "https://www.example.org";
+
+

This filter expects a regexp pattern and a replacement string as arguments. +During the streaming of the response body, every occurrence of the pattern will +be replaced with the replacement string. The editing doesn’t happen right when +the filter is executed, only later when the streaming normally happens, after +all response filters were called.

+

The sed() filter accepts two optional arguments, the max editor buffer size in +bytes, and max buffer handling flag. The max buffer size, when set, defines how +much data can be buffered at a given time by the editor. The default value is +2MiB. The max buffer handling flag can take one of two values: “abort” or +“best-effort” (default). Setting “abort” means that the stream will be aborted +when reached the limit. Setting “best-effort”, will run the replacement on the +available content, in case of certain patterns, this may result in content that +is different from one that would have been edited in a single piece. See more +details below.

+

The filter uses the go regular expression implementation: +https://github.com/google/re2/wiki/Syntax . Due to the streaming nature, matches +with zero length are ignored.

+

Memory handling and limitations

+

In order to avoid unbound buffering of unprocessed data, the sed* filters need to +apply some limitations. Some patterns, e.g. .* would allow to match the complete +payload, and it could result in trying to buffer it all and potentially causing +running out of available memory. Similarly, in case of certain expressions, when +they don’t match, it’s impossible to tell if they would match without reading more +data from the source, and so would potentially need to buffer the entire payload.

+

To prevent too high memory usage, the max buffer size is limited in case of each +variant of the filter, by default to 2MiB, which is the same limit as the one we +apply when reading the request headers by default. When the limit is reached, and +the buffered content matches the pattern, then it is processed by replacing it, +when it doesn’t match the pattern, then it is forwarded unchanged. This way, e.g. +sed(".*", "") can be used safely to consume and discard the payload.

+

As a result of this, with large payloads, it is possible that the resulting content +will be different than if we had run the replacement on the entire content at once. +If we have enough preliminary knowledge about the payload, then it may be better to +use the delimited variant of the filters, e.g. for line based editing.

+

If the max buffer handling is set to “abort”, then the stream editing is stopped +and the rest of the payload is dropped.

+

sedDelim

+

Like sed(), but it expects an additional argument, before the optional max buffer +size argument, that is used to delimit chunks to be processed at once. The pattern +replacement is executed only within the boundaries of the chunks defined by the +delimiter, and matches across the chunk boundaries are not considered.

+

Example:

+
editorRoute: * -> sedDelim("foo", "bar", "\n") -> "https://www.example.org";
+
+

sedRequest

+

Like sed(), but for the request content.

+

Example:

+
editorRoute: * -> sedRequest("foo", "bar") -> "https://www.example.org";
+
+

sedRequestDelim

+

Like sedDelim(), but for the request content.

+

Example:

+
editorRoute: * -> sedRequestDelim("foo", "bar", "\n") -> "https://www.example.org";
+
+

Authentication and Authorization

+

basicAuth

+

Enable Basic Authentication

+

The filter accepts two parameters, the first mandatory one is the path to the +htpasswd file usually used with Apache or nginx. The second one is the optional +realm name that will be displayed in the browser. MD5, SHA1 and BCrypt are supported +for Basic authentication password storage, see also +the http-auth module page.

+

Examples:

+
basicAuth("/path/to/htpasswd")
+basicAuth("/path/to/htpasswd", "My Website")
+
+

webhook

+

The webhook filter makes it possible to have your own authentication and +authorization endpoint as a filter.

+

Headers from the incoming request will be copied into the request that +is being done to the webhook endpoint. It is possible to copy headers +from the webhook response into the continuing request by specifying the +headers to copy as an optional second argument to the filter.

+

Responses from the webhook will be treated as follows:

+
    +
  • Authorized if the status code is less than 300
  • +
  • Forbidden if the status code is 403
  • +
  • Unauthorized for remaining status codes
  • +
+

Examples:

+
webhook("https://custom-webhook.example.org/auth")
+webhook("https://custom-webhook.example.org/auth", "X-Copy-Webhook-Header,X-Copy-Another-Header")
+
+

The webhook timeout has a default of 2 seconds and can be globally +changed, if skipper is started with -webhook-timeout=2s flag.

+

Tokeninfo

+

Tokeninfo handled by another service. +The filters just validate the response from the tokeninfo +service to do authorization as defined in the filter.

+

oauthTokeninfoAnyScope

+

If skipper is started with -oauth2-tokeninfo-url flag, you can use +this filter.

+

The filter accepts variable number of string arguments, which are used to +validate the incoming token from the Authorization: Bearer <token> +header. There are two rejection scenarios for this filter. If the token +is not successfully validated by the oauth server, then a 401 Unauthorised +response will be returned. However, if the token is successfully validated +but the required scope match isn’t satisfied, then a 403 Forbidden response +will be returned. If any of the configured scopes from the filter is found +inside the tokeninfo result for the incoming token, it will allow the +request to pass.

+

Examples:

+
oauthTokeninfoAnyScope("s1", "s2", "s3")
+
+

oauthTokeninfoAllScope

+

If skipper is started with -oauth2-tokeninfo-url flag, you can use +this filter.

+

The filter accepts variable number of string arguments, which are used to +validate the incoming token from the Authorization: Bearer <token> +header. There are two rejection scenarios for this filter. If the token +is not successfully validated by the oauth server, then a 401 Unauthorised +response will be returned. However, if the token is successfully validated +but the required scope match isn’t satisfied, then a 403 Forbidden response +will be returned. If all of the configured scopes from the filter are found +inside the tokeninfo result for the incoming token, it will allow the +request to pass.

+

Examples:

+
oauthTokeninfoAllScope("s1", "s2", "s3")
+
+

oauthTokeninfoAnyKV

+

If skipper is started with -oauth2-tokeninfo-url flag, you can use +this filter.

+

The filter accepts an even number of variable arguments of type +string, which are used to validate the incoming token from the +Authorization: Bearer <token> header. There are two rejection scenarios +for this filter. If the token is not successfully validated by the oauth +server, then a 401 Unauthorised response will be returned. However, +if the token is successfully validated but the required scope match +isn’t satisfied, then a 403 Forbidden response will be returned. +If any of the configured key value pairs from the filter is found +inside the tokeninfo result for the incoming token, it will allow +the request to pass.

+

Examples:

+
oauthTokeninfoAnyKV("k1", "v1", "k2", "v2")
+oauthTokeninfoAnyKV("k1", "v1", "k1", "v2")
+
+

oauthTokeninfoAllKV

+

If skipper is started with -oauth2-tokeninfo-url flag, you can use +this filter.

+

The filter accepts an even number of variable arguments of type +string, which are used to validate the incoming token from the +Authorization: Bearer <token> header. There are two rejection +scenarios for this filter. If the token is not successfully validated +by the oauth server, then a 401 Unauthorised response will be +returned. However, if the token is successfully validated but +the required scope match isn’t satisfied, then a 403 Forbidden response +will be returned. If all of the configured key value pairs from +the filter are found inside the tokeninfo result for the incoming +token, it will allow the request to pass.

+

Examples:

+
oauthTokeninfoAllKV("k1", "v1", "k2", "v2")
+
+

Tokenintrospection

+

Tokenintrospection handled by another service. +The filters just validate the response from the tokenintrospection +service to do authorization as defined in the filter.

+

oauthTokenintrospectionAnyClaims

+

The filter accepts variable number of string arguments, which are used +to validate the incoming token from the Authorization: Bearer +<token> header. The first argument to the filter is the issuer URL, +for example https://accounts.google.com, that will be used as +described in RFC Draft +to find the configuration and for example supported claims.

+

If one of the configured and supported claims from the filter are +found inside the tokenintrospection (RFC7662) result for the incoming +token, it will allow the request to pass.

+

Examples:

+
oauthTokenintrospectionAnyClaims("https://accounts.google.com", "c1", "c2", "c3")
+
+

oauthTokenintrospectionAllClaims

+

The filter accepts variable number of string arguments, which are used +to validate the incoming token from the Authorization: Bearer +<token> header. The first argument to the filter is the issuer URL, +for example https://accounts.google.com, that will be used as +described in RFC Draft +to find the configuration and for example supported claims.

+

If all of the configured and supported claims from the filter are +found inside the tokenintrospection (RFC7662) result for the incoming +token, it will allow the request to pass.

+

Examples:

+
oauthTokenintrospectionAllClaims("https://accounts.google.com", "c1", "c2", "c3")
+
+

oauthTokenintrospectionAnyKV

+

The filter accepts an even number of variable arguments of type +string, which are used to validate the incoming token from the +Authorization: Bearer <token> header. The first argument to the +filter is the issuer URL, for example https://accounts.google.com, +that will be used as described in +RFC Draft +to find the configuration and for example supported claims.

+

If one of the configured key value pairs from the filter are found +inside the tokenintrospection (RFC7662) result for the incoming token, +it will allow the request to pass.

+

Examples:

+
oauthTokenintrospectionAnyKV("https://accounts.google.com", "k1", "v1", "k2", "v2")
+oauthTokenintrospectionAnyKV("https://accounts.google.com", "k1", "v1", "k1", "v2")
+
+

oauthTokenintrospectionAllKV

+

The filter accepts an even number of variable arguments of type +string, which are used to validate the incoming token from the +Authorization: Bearer <token> header. The first argument to the +filter is the issuer URL, for example https://accounts.google.com, +that will be used as described in +RFC Draft +to find the configuration and for example supported claims.

+

If all of the configured key value pairs from the filter are found +inside the tokenintrospection (RFC7662) result for the incoming token, +it will allow the request to pass.

+

Examples:

+
oauthTokenintrospectionAllKV("https://accounts.google.com", "k1", "v1", "k2", "v2")
+
+

secureOauthTokenintrospectionAnyClaims

+

The filter accepts variable number of string arguments, which are used +to validate the incoming token from the Authorization: Bearer +<token> header. The first argument to the filter is the issuer URL, +for example https://accounts.google.com, that will be used as +described in RFC Draft +to find the configuration and for example supported claims.

+

Second and third arguments are the client-id and client-secret. +Use this filter if the Token Introspection endpoint requires authorization to validate and decode the incoming token. +The filter will optionally read client-id and client-secret from environment variables: OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET

+

If one of the configured and supported claims from the filter are +found inside the tokenintrospection (RFC7662) result for the incoming +token, it will allow the request to pass.

+

Examples:

+
secureOauthTokenintrospectionAnyClaims("issuerURL", "client-id", "client-secret", "claim1", "claim2")
+
+

Read client-id and client-secret from environment variables +

secureOauthTokenintrospectionAnyClaims("issuerURL", "", "", "claim1", "claim2")
+

+

secureOauthTokenintrospectionAllClaims

+

The filter accepts variable number of string arguments, which are used +to validate the incoming token from the Authorization: Bearer +<token> header. The first argument to the filter is the issuer URL, +for example https://accounts.google.com, that will be used as +described in RFC Draft +to find the configuration and for example supported claims.

+

Second and third arguments are the client-id and client-secret. +Use this filter if the Token Introspection endpoint requires authorization to validate and decode the incoming token. +The filter will optionally read client-id and client-secret from environment variables: OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET

+

If all of the configured and supported claims from the filter are +found inside the tokenintrospection (RFC7662) result for the incoming +token, it will allow the request to pass.

+

Examples:

+
secureOauthTokenintrospectionAllClaims("issuerURL", "client-id", "client-secret", "claim1", "claim2")
+
+

Read client-id and client-secret from environment variables +

secureOauthTokenintrospectionAllClaims("issuerURL", "", "", "claim1", "claim2")
+

+

secureOauthTokenintrospectionAnyKV

+

The filter accepts an even number of variable arguments of type +string, which are used to validate the incoming token from the +Authorization: Bearer <token> header. The first argument to the +filter is the issuer URL, for example https://accounts.google.com, +that will be used as described in +RFC Draft +to find the configuration and for example supported claims.

+

Second and third arguments are the client-id and client-secret. +Use this filter if the Token Introspection endpoint requires authorization to validate and decode the incoming token. +The filter will optionally read client-id and client-secret from environment variables: OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET

+

If one of the configured key value pairs from the filter are found +inside the tokenintrospection (RFC7662) result for the incoming token, +it will allow the request to pass.

+

Examples:

+
secureOauthTokenintrospectionAnyKV("issuerURL", "client-id", "client-secret", "k1", "v1", "k2", "v2")
+
+

Read client-id and client-secret from environment variables +

secureOauthTokenintrospectionAnyKV("issuerURL", "", "", "k1", "v1", "k2", "v2")
+

+

secureOauthTokenintrospectionAllKV

+

The filter accepts an even number of variable arguments of type +string, which are used to validate the incoming token from the +Authorization: Bearer <token> header. The first argument to the +filter is the issuer URL, for example https://accounts.google.com, +that will be used as described in +RFC Draft +to find the configuration and for example supported claims.

+

Second and third arguments are the client-id and client-secret. +Use this filter if the Token Introspection endpoint requires authorization to validate and decode the incoming token. +The filter will optionally read client-id and client-secret from environment variables: OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET

+

If all of the configured key value pairs from the filter are found +inside the tokenintrospection (RFC7662) result for the incoming token, +it will allow the request to pass.

+

Examples:

+
secureOauthTokenintrospectionAllKV("issuerURL", "client-id", "client-secret", "k1", "v1", "k2", "v2")
+
+

Read client-id and client-secret from environment variables +

secureOauthTokenintrospectionAllKV("issuerURL", "", "", "k1", "v1", "k2", "v2")
+

+

JWT

+

jwtValidation

+

The filter parses bearer jwt token from Authorization header and validates the signature using public keys +discovered via /.well-known/openid-configuration endpoint. Takes issuer url as single parameter. +The filter stores token claims into the state bag where they can be used by oidcClaimsQuery() or forwardTokenPart()

+

Examples:

+
jwtValidation("https://login.microsoftonline.com/{tenantId}/v2.0")
+
+

jwtMetrics

+
+

This filter is experimental and may change in the future, please see tests for example usage.

+
+

The filter parses (but does not validate) JWT token from Authorization request header on response path +and increments the following counters:

+
    +
  • missing-token: request does not have Authorization header
  • +
  • invalid-token-type: Authorization header value is not a Bearer type
  • +
  • invalid-token: Authorization header does not contain a JWT token
  • +
  • missing-issuer: JWT token does not have iss claim
  • +
  • invalid-issuer: JWT token does not have any of the configured issuers
  • +
+

Each counter name uses concatenation of request method, escaped hostname and response status as a prefix, e.g.:

+
jwtMetrics.custom.GET.example_org.200.invalid-token
+
+

and therefore requires approximately count(HTTP methods) * count(Hosts) * count(Statuses) * 8 bytes of additional memory.

+

The filter does nothing if response status is 4xx or route is opt-out via annotation or state bag value.

+

The filter requires single string argument that is parsed as YAML. +For convenience use flow style format.

+

Examples:

+
jwtMetrics("{issuers: ['https://example.com', 'https://example.org']}")
+
+// opt-out by annotation
+annotate("oauth.disabled", "this endpoint is public") ->
+jwtMetrics("{issuers: ['https://example.com', 'https://example.org'], optOutAnnotations: [oauth.disabled]}")
+
+// opt-out by state bag:
+// oauthTokeninfo* and oauthGrant filters store token info in the state bag using "tokeninfo" key.
+oauthTokeninfoAnyKV("foo", "bar") ->
+jwtMetrics("{issuers: ['https://example.com', 'https://example.org'], optOutStateBag: [tokeninfo]}")
+
+

Forward Token Data

+

forwardToken

+

The filter takes the header name as its first argument and sets header value to the +token info or token introspection result serialized as a JSON object. +To include only particular fields provide their names as additional arguments.

+

If this filter is used when there is no token introspection or token info data +then it does not have any effect.

+

Examples:

+
forwardToken("X-Tokeninfo-Forward")
+forwardToken("X-Tokeninfo-Forward", "access_token", "token_type")
+
+

forwardTokenField

+

The filter takes a header name and a field as its first and second arguments. The corresponding field from the result of token info, token introspection or oidc user info is added as +corresponding header when the request is passed to the backend.

+

If this filter is used when there is no token introspection, token info or oidc user info data +then it does not have any effect.

+

To forward multiple fields filters can be sequenced

+

Examples:

+
forwardTokenField("X-Tokeninfo-Forward-Oid", "oid") -> forwardTokenField("X-Tokeninfo-Forward-Sub", "sub")
+
+

OAuth2

+

oauthGrant

+

Enables authentication and authorization with an OAuth2 authorization code grant flow as +specified by RFC 6749 Section 1.3.1. +Automatically redirects unauthenticated users to log in at their provider’s authorization +endpoint. Supports token refreshing and stores access and refresh tokens in an encrypted +cookie. Supports credential rotation for the OAuth2 client ID and secret.

+

The filter consumes and drops the grant token request cookie to prevent it from leaking +to untrusted downstream services.

+

The filter will inject the OAuth2 bearer token into the request headers if the flag +oauth2-access-token-header-name is set.

+

The filter must be used in conjunction with the grantCallback filter +where the OAuth2 provider can redirect authenticated users with an authorization code. +Skipper will make sure to add the grantCallback filter for you to your routes when +you pass the -enable-oauth2-grant-flow flag.

+

The filter may be used with the grantClaimsQuery filter to perform +authz and access control.

+

The filter also supports javascript login redirect stub that can be used e.g. to store location hash. +To enable the stub, add preceding annotate filter with oauthGrant.loginRedirectStub key and +HTML content that will be served to the client instead of 307 Temporary Redirect to the authorization URL. +The filter will replace {{authCodeURL}} placeholder in the content with the actual authorization URL.

+

See the tutorial for step-by-step +instructions.

+

Examples:

+
all:
+    *
+    -> oauthGrant()
+    -> "http://localhost:9090";
+
+
single_page_app:
+    *
+    -> annotate("oauthGrant.loginRedirectStub", `
+          <!doctype html>
+          <html lang="en">
+            <head>
+              <title>Redirecting...</title>
+              <script>
+                if (window.location.hash !== null) {
+                  localStorage.setItem('original-location-hash', window.location.hash);
+                }
+                window.location.replace('{{authCodeURL}}');
+              </script>
+            </head>
+          </html>
+    `)
+    -> oauthGrant()
+    -> "http://localhost:9090";
+
+

Skipper arguments:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ArgumentRequired?Description
-enable-oauth2-grant-flowyestoggle flag to enable the oauthGrant() filter. Must be set if you use the filter in routes. Example: -enable-oauth2-grant-flow
-oauth2-auth-urlyesURL of the OAuth2 provider’s authorize endpoint. Example: -oauth2-auth-url=https://identity.example.com/oauth2/authorize
-oauth2-token-urlyesURL of the OAuth2 provider’s token endpoint. Example: -oauth2-token-url=https://identity.example.com/oauth2/token
-oauth2-tokeninfo-urlyesURL of the OAuth2 provider’s tokeninfo endpoint. Example: -oauth2-tokeninfo-url=https://identity.example.com/oauth2/tokeninfo
-oauth2-secret-fileyespath to the file containing the secret for encrypting and decrypting the grant token cookie (the secret can be anything). Example: -oauth2-secret-file=/path/to/secret
-oauth2-client-id-fileconditionalpath to the file containing the OAuth2 client ID. Required if you have not set -oauth2-client-id. Example: -oauth2-client-id-file=/path/to/client_id
-oauth2-client-secret-fileconditionalpath to the file containing the OAuth2 client secret. Required if you have not set -oauth2-client-secret. Example: -oauth2-client-secret-file=/path/to/client_secret
-oauth2-client-idconditionalOAuth2 client ID for authenticating with your OAuth2 provider. Required if you have not set -oauth2-client-id-file. Example: -oauth2-client-id=myclientid
-oauth2-client-secretconditionalOAuth2 client secret for authenticating with your OAuth2 provider. Required if you have not set -oauth2-client-secret-file. Example: -oauth2-client-secret=myclientsecret
-credentials-update-intervalnothe time interval for updating client id and client secret from files. Example: -credentials-update-interval=30s
-oauth2-access-token-header-namenothe name of the request header where the user’s bearer token should be set. Example: -oauth2-access-token-header-name=X-Grant-Authorization
-oauth2-grant-tokeninfo-keysnocomma separated list of keys to preserve in OAuth2 Grant Flow tokeninfo. Default: empty, preserves all tokeninfo keys. Example: -oauth2-grant-tokeninfo-keys=scope,realm,expires_in
-oauth2-auth-url-parametersnoany additional URL query parameters to set for the OAuth2 provider’s authorize and token endpoint calls. Example: -oauth2-auth-url-parameters=key1=foo,key2=bar
-oauth2-callback-pathnopath of the Skipper route containing the grantCallback() filter for accepting an authorization code and using it to get an access token. Example: -oauth2-callback-path=/oauth/callback
-oauth2-token-cookie-namenothe name of the cookie where the access tokens should be stored in encrypted form. Default: oauth-grant. Example: -oauth2-token-cookie-name=SESSION
-oauth2-token-cookie-remove-subdomainsnothe number of subdomains to remove from the callback request hostname to obtain token cookie domain. Default: 1. Example: -oauth2-token-cookie-remove-subdomains=0
-oauth2-grant-insecurenoomits Secure attribute of the token cookie and uses http scheme for callback url. Default: false
+

grantCallback

+

The filter accepts authorization codes as a result of an OAuth2 authorization code grant +flow triggered by oauthGrant. It uses the code to request access and +refresh tokens from the OAuth2 provider’s token endpoint.

+

Examples:

+
// The callback route is automatically added when the `-enable-oauth2-grant-flow`
+// flag is passed. You do not need to register it yourself. This is the equivalent
+// of the route that Skipper adds for you:
+callback:
+    Path("/.well-known/oauth2-callback")
+    -> grantCallback()
+    -> <shunt>;
+
+

Skipper arguments:

+ + + + + + + + + + + + + + + +
ArgumentRequired?Description
-oauth2-callback-pathnopath of the Skipper route containing the grantCallback() filter. Example: -oauth2-callback-path=/oauth/callback
+

grantLogout

+

The filter revokes the refresh and access tokens in the cookie set by +oauthGrant if -oauth2-revoke-token-url is configured. +It also deletes the cookie by setting the Set-Cookie response header +to an empty value after a successful token revocation.

+

Examples:

+
grantLogout()
+
+

Skipper arguments:

+ + + + + + + + + + + + + + + +
ArgumentRequired?Description
-oauth2-revoke-token-urlnoURL of the OAuth2 provider’s token revocation endpoint. Example: -oauth2-revoke-token-url=https://identity.example.com/oauth2/revoke
+

grantClaimsQuery

+

The filter allows defining access control rules based on claims in a tokeninfo JSON +payload.

+

This filter is an alias for oidcClaimsQuery and functions identically to it. +See oidcClaimsQuery for more information.

+

Examples:

+
oauthGrant() -> grantClaimsQuery("/path:@_:sub%\"userid\"")
+oauthGrant() -> grantClaimsQuery("/path:scope.#[==\"email\"]")
+
+

Skipper arguments:

+ + + + + + + + + + + + + + + +
ArgumentRequired?Description
-oauth2-tokeninfo-subject-keyyesthe key of the attribute containing the OAuth2 subject ID in the OAuth2 provider’s tokeninfo JSON payload. Default: uid. Example: -oauth2-tokeninfo-subject-key=sub
+

OpenID Connect

+

To enable OpenID Connect filters use -oidc-secrets-file command line flag.

+

oauthOidcUserInfo

+
oauthOidcUserInfo("https://oidc-provider.example.com", "client_id", "client_secret",
+    "http://target.example.com/subpath/callback", "email profile", "name email picture",
+    "parameter=value", "X-Auth-Authorization:claims.email", "0")
+
+

The filter needs the following parameters:

+
    +
  • OpenID Connect Provider URL For example Google OpenID Connect is available on https://accounts.google.com
  • +
  • Client ID This value is obtained from the provider upon registration of the application.
  • +
  • Client Secret Also obtained from the provider
  • +
  • Callback URL The entire path to the callback from the provider on which the token will be received. + It can be any value which is a subpath on which the filter is applied.
  • +
  • Scopes The OpenID scopes separated by spaces which need to be specified when requesting the token from the provider.
  • +
  • Claims The claims which should be present in the token returned by the provider.
  • +
  • Auth Code Options (optional) Passes key/value parameters to a provider’s authorization endpoint. The value can be dynamically set by a query parameter with the same key name if the placeholder skipper-request-query is used.
  • +
  • Upstream Headers (optional) The upstream endpoint will receive these headers which values are parsed from the OIDC information. The header definition can be one or more header-query pairs, space delimited. The query syntax is GJSON.
  • +
  • SubdomainsToRemove (optional, default “1”) Configures number of subdomains to remove from the request hostname to derive OIDC cookie domain. By default one subdomain is removed, e.g. for the www.example.com request hostname the OIDC cookie domain will be example.com (to support SSO for all subdomains of the example.com). Configure “0” to use the same hostname. Note that value is a string.
  • +
+

oauthOidcAnyClaims

+
oauthOidcAnyClaims("https://oidc-provider.example.com", "client_id", "client_secret",
+    "http://target.example.com/subpath/callback", "email profile", "name email picture",
+    "parameter=value", "X-Auth-Authorization:claims.email")
+
+

The filter needs the following parameters:

+
    +
  • OpenID Connect Provider URL For example Google OpenID Connect is available on https://accounts.google.com
  • +
  • Client ID This value is obtained from the provider upon registration of the application.
  • +
  • Client Secret Also obtained from the provider
  • +
  • Callback URL The entire path to the callback from the provider on which the token will be received. + It can be any value which is a subpath on which the filter is applied.
  • +
  • Scopes The OpenID scopes separated by spaces which need to be specified when requesting the token from the provider.
  • +
  • Claims Several claims can be specified and the request is allowed as long as at least one of them is present.
  • +
  • Auth Code Options (optional) Passes key/value parameters to a provider’s authorization endpoint. The value can be dynamically set by a query parameter with the same key name if the placeholder skipper-request-query is used.
  • +
  • Upstream Headers (optional) The upstream endpoint will receive these headers which values are parsed from the OIDC information. The header definition can be one or more header-query pairs, space delimited. The query syntax is GJSON.
  • +
  • SubdomainsToRemove (optional, default “1”) Configures number of subdomains to remove from the request hostname to derive OIDC cookie domain. By default one subdomain is removed, e.g. for the www.example.com request hostname the OIDC cookie domain will be example.com (to support SSO for all subdomains of the example.com). Configure “0” to use the same hostname. Note that value is a string.
  • +
+

oauthOidcAllClaims

+
oauthOidcAllClaims("https://oidc-provider.example.com", "client_id", "client_secret",
+    "http://target.example.com/subpath/callback", "email profile", "name email picture",
+    "parameter=value", "X-Auth-Authorization:claims.email")
+
+

The filter needs the following parameters:

+
    +
  • OpenID Connect Provider URL For example Google OpenID Connect is available on https://accounts.google.com
  • +
  • Client ID This value is obtained from the provider upon registration of the application.
  • +
  • Client Secret Also obtained from the provider
  • +
  • Callback URL The entire path to the callback from the provider on which the token will be received. + It can be any value which is a subpath on which the filter is applied.
  • +
  • Scopes The OpenID scopes separated by spaces which need to be specified when requesting the token from the provider.
  • +
  • Claims Several claims can be specified and the request is allowed only when all claims are present.
  • +
  • Auth Code Options (optional) Passes key/value parameters to a provider’s authorization endpoint. The value can be dynamically set by a query parameter with the same key name if the placeholder skipper-request-query is used.
  • +
  • Upstream Headers (optional) The upstream endpoint will receive these headers which values are parsed from the OIDC information. The header definition can be one or more header-query pairs, space delimited. The query syntax is GJSON.
  • +
  • SubdomainsToRemove (optional, default “1”) Configures number of subdomains to remove from the request hostname to derive OIDC cookie domain. By default one subdomain is removed, e.g. for the www.example.com request hostname the OIDC cookie domain will be example.com (to support SSO for all subdomains of the example.com). Configure “0” to use the same hostname. Note that value is a string.
  • +
+

oidcClaimsQuery

+
oidcClaimsQuery("<path>:[<query>]", ...)
+
+

The filter is chained after oauthOidc* authentication as it parses the ID token that has been saved in the internal StateBag for this request. It validates access control of the requested path against the defined query. +It accepts one or more arguments, that is a path prefix which is granted access to when the query definition evaluates positive. +It supports exact matches of keys, key-value pairs, introspecting of arrays or exact and wildcard matching of nested structures. +The query definition can be one or more queries per path, space delimited. The query syntax is GJSON with a convenience modifier of @_ which unfolds to [@this].#("+arg+")

+

Given following example ID token:

+
{
+  "email": "someone@example.org",
+  "groups": [
+    "CD-xyz",
+    "appX-Test-Users",
+    "Purchasing-Department"
+  ],
+  "name": "Some One"
+}
+
+

Access to path / would be granted to everyone in example.org, however path /login only to those being member of group "appX-Tester":

+
oauthOidcAnyClaims(...) -> oidcClaimsQuery("/login:groups.#[==\"appX-Tester\"]", "/:@_:email%\"*@example.org\"")
+
+

For above ID token following query definitions would also be positive:

+
oidcClaimsQuery("/:email")
+oidcClaimsQuery("/another/path:groups.#[%\"CD-*\"]")
+oidcClaimsQuery("/:name%\"*One\"", "/path:groups.#[%\"*-Test-Users\"] groups.#[==\"Purchasing-Department\"]")
+
+

As of now there is no negative/deny rule possible. The first matching path is evaluated against the defined query/queries and if positive, permitted.

+

Open Policy Agent

+

To get started with Open Policy Agent, also have a look at the tutorial. This section is only a reference for the implemented filters.

+

opaAuthorizeRequest

+

The canonical use case that is also implemented with Envoy External Authorization: Use the http request to evaluate if Skipper should deny the request (with customizable response) or let the request pass to the downstream service

+

Example:

+
opaAuthorizeRequest("my-app-id")
+
+

Example (passing context): +

opaAuthorizeRequest("my-app-id", "com.mydomain.xxx.myprop: myvalue")
+

+

Data Flows

+

The data flow in case the policy allows the request looks like this

+
             ┌──────────────────┐               ┌────────────────────┐
+ (1) Request │     Skipper      │ (4) Request   │ Target Application │
+─────────────┤                  ├──────────────►│                    │
+             │                  │               │                    │
+ (6) Response│   (2)│   ▲ (3)   │ (5) Response  │                    │
+◄────────────┤Req ->│   │ allow │◄──────────────┤                    │
+             │Input │   │       │               │                    │
+             ├──────┴───┴───────┤               └────────────────────┘
+             │Open Policy Agent │
+             │      │   │       │
+             │      │   │       │
+             │      │   │       │
+             │      ▼   │       │
+             │ ┌────────┴─────┐ │
+             │ │   Policy     │ │
+             │ └──────────────┘ │
+             │                  │
+             └──────────────────┘
+
+

In Step (2) the http request is transformed into an input object following the Envoy structure that is also used by the OPA Envoy plugin. In (3) the decision of the policy is evaluated. If it is equivalent to an “allow”, the remaining steps are executed as without the filter.

+

The data flow in case the policy disallows the request looks like this

+
             ┌──────────────────┐               ┌────────────────────┐
+ (1) Request │     Skipper      │               │ Target Applicatio  │
+─────────────┤                  ├──────────────►│                    │
+             │                  │               │                    │
+ (4) Response│   (2)│   ▲ (3)   │               │                    │
+◄────────────┤Req ->│   │ allow │◄──────────────┤                    │
+             │Input │   │ =false│               │                    │
+             ├──────┴───┴───────┤               └────────────────────┘
+             │Open Policy Agent │
+             │      │   │       │
+             │      │   │       │
+             │      │   │       │
+             │      ▼   │       │
+             │ ┌────────┴─────┐ │
+             │ │   Policy     │ │
+             │ └──────────────┘ │
+             │                  │
+             └──────────────────┘
+
+

The difference is that if the decision in (3) is equivalent to false, the response is handled directly from the filter. If the decision contains response body, status or headers those are used to build the response in (6) otherwise a 403 Forbidden with a generic body is returned.

+

Manipulating Request Headers

+

Headers both to the upstream and the downstream service can be manipulated the same way this works for Envoy external authorization

+

This allows both to add and remove unwanted headers in allow/deny cases.

+

opaAuthorizeRequestWithBody

+

Requests can also be authorized based on the request body the same way that is supported with the Open Policy Agent Envoy plugin, look for the input attribute parsed_body in the upstream documentation.

+

This filter has the same parameters that the opaAuthorizeRequest filter has.

+

A request’s body is parsed up to a maximum size with a default of 1MB that can be configured via the -open-policy-agent-max-request-body-size command line argument. To avoid OOM errors due to too many concurrent authorized body requests, another flag -open-policy-agent-max-memory-body-parsing controls how much memory can be used across all requests with a default of 100MB. If in-flight requests that use body authorization exceed that limit, incoming requests that use the body will be rejected with an internal server error. The number of concurrent requests is

+
\[ n_{max-memory-body-parsing} \over min(avg(n_{request-content-length}), n_{max-request-body-size}) \]
+

so if requests on average have 100KB and the maximum memory is set to 100MB, on average 1024 authorized requests can be processed concurrently.

+

The filter also honors the skip-request-body-parse of the corresponding configuration that the OPA plugin uses.

+

opaServeResponse

+

Always serves the response even if the policy allows the request and can customize the response completely. Can be used to re-implement legacy authorization services by already using data in Open Policy Agent but implementing an old REST API. This can also be useful to support Single Page Applications to return the calling users’ permissions.

+

Hint: As there is no real allow/deny in this case and the policy computes the http response, you typically will want to drop all decision logs

+

Example:

+
opaServeResponse("my-app-id")
+
+

Example (passing context): +

opaServeResponse("my-app-id", "com.mydomain.xxx.myprop: myvalue")
+

+

Data Flows

+

For this filter, the data flow looks like this independent of an allow/deny decision

+
             ┌──────────────────┐
+ (1) Request │     Skipper      │
+─────────────┤                  ├
+             │                  │
+ (4) Response│   (2)│   ▲ (3)   │
+◄────────────┤Req ->│   │ resp  │
+             │Input │   │       │
+             ├──────┴───┴───────┤
+             │Open Policy Agent │
+             │      │   │       │
+             │      │   │       │
+             │      │   │       │
+             │      ▼   │       │
+             │ ┌────────┴─────┐ │
+             │ │   Policy     │ │
+             │ └──────────────┘ │
+             │                  │
+             └──────────────────┘
+
+

opaServeResponseWithReqBody

+

If you want to serve requests directly from an Open Policy Agent policy that uses the request body, this can be done by using the input.parsed_body attribute the same way that is supported with the Open Policy Agent Envoy plugin.

+

This filter has the same parameters that the opaServeResponse filter has.

+

A request’s body is parsed up to a maximum size with a default of 1MB that can be configured via the -open-policy-agent-max-request-body-size command line argument. To avoid OOM errors due to too many concurrent authorized body requests, another flag -open-policy-agent-max-memory-body-parsing controls how much memory can be used across all requests with a default of 100MB. If in-flight requests that use body authorization exceed that limit, incoming requests that use the body will be rejected with an internal server error. The number of concurrent requests is

+
\[ n_{max-memory-body-parsing} \over min(avg(n_{request-content-length}), n_{max-request-body-size}) \]
+

so if requests on average have 100KB and the maximum memory is set to 100MB, on average 1024 authorized requests can be processed concurrently.

+

The filter also honors the skip-request-body-parse of the corresponding configuration that the OPA plugin uses.

+ +

dropRequestCookie

+

Deletes given cookie from the request header.

+

Parameters:

+
    +
  • cookie name (string)
  • +
+

Example:

+
dropRequestCookie("test-session")
+
+

dropResponseCookie

+

Deletes given cookie from the response header.

+

Parameters:

+
    +
  • cookie name (string)
  • +
+

Example:

+
dropResponseCookie("test-session")
+
+

requestCookie

+

Append a cookie to the request header.

+

Parameters:

+
    +
  • cookie name (string)
  • +
  • cookie value (string)
  • +
+

Example:

+
requestCookie("test-session", "abc")
+
+

responseCookie

+

Appends a cookie to the response via “Set-Cookie” header. +It derives cookie domain by removing one subdomain from the request hostname domain. +The filter accepts an optional argument to set the Max-Age attribute of the cookie, of type int, in seconds. +Use zero to expire the cookie immediately. +An optional fourth argument, “change-only”, controls if the cookie should be set on every +response, or only if the request does not contain a cookie with the provided +name and value.

+

Example:

+
responseCookie("test-session", "abc")
+responseCookie("test-session", "abc", 31536000),
+responseCookie("test-session", "abc", 31536000, "change-only")
+responseCookie("test-session", "deleted", 0),
+
+

jsCookie

+

The JS cookie behaves exactly as the response cookie, but it does not set the +HttpOnly directive, so these cookies will be accessible from JS code running +in web browsers.

+

Example:

+
jsCookie("test-session-info", "abc-debug", 31536000, "change-only")
+
+

Circuit Breakers

+

consecutiveBreaker

+

This breaker opens when the proxy could not connect to a backend or received +a >=500 status code at least N times in a row. When open, the proxy returns +503 - Service Unavailable response during the breaker timeout. After this timeout, +the breaker goes into half-open state, in which it expects that M number of +requests succeed. The requests in the half-open state are accepted concurrently. +If any of the requests during the half-open state fails, the breaker goes back to +open state. If all succeed, it goes to closed state again.

+

Parameters:

+
    +
  • number of consecutive failures to open (int)
  • +
  • timeout (time string, parseable by time.Duration) - optional
  • +
  • half-open requests (int) - optional
  • +
  • idle-ttl (time string, parseable by time.Duration) - optional
  • +
+

See also the circuit breaker docs.

+

Can be used as egress feature.

+

rateBreaker

+

The “rate breaker” works similar to the consecutiveBreaker, but +instead of considering N consecutive failures for going open, it maintains a sliding +window of the last M events, both successes and failures, and opens only when the number +of failures reaches N within the window. This way the sliding window is not time based +and allows the same breaker characteristics for low and high rate traffic.

+

Parameters:

+
    +
  • number of consecutive failures to open (int)
  • +
  • sliding window (int)
  • +
  • timeout (time string, parseable by time.Duration) - optional
  • +
  • half-open requests (int) - optional
  • +
  • idle-ttl (time string, parseable by time.Duration) - optional
  • +
+

See also the circuit breaker docs.

+

Can be used as egress feature.

+

disableBreaker

+

Change (or set) the breaker configurations for an individual route and disable for another, in eskip:

+
updates: Method("POST") && Host("foo.example.org")
+  -> consecutiveBreaker(9)
+  -> "https://foo.backend.net";
+
+backendHealthcheck: Path("/healthcheck")
+  -> disableBreaker()
+  -> "https://foo.backend.net";
+
+

See also the circuit breaker docs.

+

Can be used as egress feature.

+

Rate Limit

+

localRatelimit

+

DEPRECATED use clientRatelimit with the same + settings instead.

+

clientRatelimit

+

Per skipper instance calculated ratelimit, that allows number of +requests by client. The definition of the same client is based on data +of the http header and can be changed with an optional third +parameter. If the third parameter is set skipper will use the +defined HTTP header to put the request in the same client bucket, +else the X-Forwarded-For Header will be used. You need to run skipper +with command line flag -enable-ratelimits.

+

One filter consumes memory calculated by the following formula, where +N is the number of individual clients put into the same bucket, M the +maximum number of requests allowed:

+
memory = N * M * 15 byte
+
+

Memory usage examples:

+
    +
  • 5MB for M=3 and N=100000
  • +
  • 15MB for M=10 and N=100000
  • +
  • 150MB for M=100 and N=100000
  • +
+

Parameters:

+
    +
  • number of allowed requests per time period (int)
  • +
  • time period for requests being counted (time.Duration)
  • +
  • optional parameter to set the same client by header, in case the provided string contains ,, it will combine all these headers (string)
  • +
+
clientRatelimit(3, "1m")
+clientRatelimit(3, "1m", "Authorization")
+clientRatelimit(3, "1m", "X-Foo,Authorization,X-Bar")
+
+

See also the ratelimit docs.

+

ratelimit

+

Per skipper instance calculated ratelimit, that allows forwarding a +number of requests to the backend group. You need to run skipper with +command line flag -enable-ratelimits.

+

Parameters:

+
    +
  • number of allowed requests per time period (int)
  • +
  • time period for requests being counted (time.Duration)
  • +
  • response status code to use for a rate limited request - optional, default: 429
  • +
+
ratelimit(20, "1m")
+ratelimit(300, "1h")
+ratelimit(4000, "1m", 503)
+
+

See also the ratelimit docs.

+

clusterClientRatelimit

+

This ratelimit is calculated across all skipper peers and the same +rate limit group. The first parameter is a string to select the same +ratelimit group across one or more routes. +The rate limit group allows the given number of requests by client. +The client identity is derived from the value of the X-Forwarded-For header or client IP address +and can be changed with an optional fourth parameter. +The optional fourth parameter may specify comma-separated list of header names. +Skipper will join header values to obtain client identity. +If identity value is empty (i.e. when all header values are empty or missing) then ratelimit does not apply.

+

You need to run skipper with command line flags -enable-swarm and +-enable-ratelimits. See also our cluster ratelimit tutorial

+

Parameters:

+
    +
  • rate limit group (string)
  • +
  • number of allowed requests per time period (int)
  • +
  • time period for requests being counted (time.Duration)
  • +
  • optional parameter to set the same client by header, in case the provided string contains ,, it will combine all these headers (string)
  • +
+
clusterClientRatelimit("groupA", 10, "1h")
+clusterClientRatelimit("groupA", 10, "1h", "Authorization")
+clusterClientRatelimit("groupA", 10, "1h", "X-Forwarded-For,Authorization,User-Agent")
+
+

See also the ratelimit docs.

+

clusterRatelimit

+

This ratelimit is calculated across all skipper peers and the same +rate limit group. The first parameter is a string to select the same +ratelimit group across one or more routes. The rate limit group +allows the given number of requests to a backend.

+

You need to run skipper with command line flags -enable-swarm and +-enable-ratelimits. See also our cluster ratelimit tutorial

+

Parameters:

+
    +
  • rate limit group (string)
  • +
  • number of allowed requests per time period (int)
  • +
  • time period for requests being counted (time.Duration)
  • +
  • response status code to use for a rate limited request - optional, default: 429
  • +
+
clusterRatelimit("groupB", 20, "1m")
+clusterRatelimit("groupB", 300, "1h")
+clusterRatelimit("groupB", 4000, "1m", 503)
+
+

See also the ratelimit docs.

+

backendRatelimit

+

The filter configures request rate limit for each backend endpoint within rate limit group across all Skipper peers. +When limit is reached Skipper refuses to forward the request to the backend and +responds with 503 Service Unavailable status to the client, i.e. implements load shedding.

+

It is similar to clusterClientRatelimit filter but counts request rate +using backend endpoint address instead of incoming request IP address or a HTTP header. +Requires command line flags -enable-swarm and -enable-ratelimits.

+

Both rate limiting and load shedding can use the exact same mechanism to protect the backend but the key difference is the semantics:

+
    +
  • rate limiting should adopt 4XX and inform the client that they are exceeding some quota. It doesn’t depend on the current capacity of the backend.
  • +
  • load shedding should adopt 5XX and inform the client that the backend is not able to provide the service. It depends on the current capacity of the backend.
  • +
+

Parameters:

+
    +
  • rate limit group (string)
  • +
  • number of allowed requests per time period (int)
  • +
  • timeframe for requests being counted (time.Duration)
  • +
  • response status code to use for rejected requests - optional, default: 503
  • +
+

Multiple filter definitions using the same group must use the same number of allowed requests and timeframe values.

+

Examples:

+

foo: Path("/foo")
+  -> backendRatelimit("foobar", 100, "1s")
+  -> <"http://backend1", "http://backend2">;
+
+bar: Path("/bar")
+  -> backendRatelimit("foobar", 100, "1s")
+  -> <"http://backend1", "http://backend2">;
+
+Configures rate limit of 100 requests per second for each backend1 and backend2 +regardless of the request path by using the same group name, number of request and timeframe parameters.

+

foo: Path("/foo")
+  -> backendRatelimit("foo", 40, "1s")
+  -> <"http://backend1", "http://backend2">;
+
+bar: Path("/bar")
+  -> backendRatelimit("bar", 80, "1s")
+  -> <"http://backend1", "http://backend2">;
+
+Configures rate limit of 40 requests per second for each backend1 and backend2 +for the /foo requests and 80 requests per second for the /bar requests by using different group name per path. +The total request rate each backend receives can not exceed 40+80=120 requests per second.

+

foo: Path("/baz")
+  -> backendRatelimit("baz", 100, "1s", 429)
+  -> <"http://backend1", "http://backend2">;
+
+Configures rate limit of 100 requests per second for each backend1 and backend2 and responds +with 429 Too Many Requests when limit is reached.

+

clusterLeakyBucketRatelimit

+

Implements leaky bucket rate limit algorithm that uses Redis as a storage. +Requires command line flags -enable-ratelimits, -enable-swarm and -swarm-redis-urls to be set.

+

The leaky bucket is an algorithm based on an analogy of how a bucket with a constant leak will overflow if either +the average rate at which water is poured in exceeds the rate at which the bucket leaks or if more water than +the capacity of the bucket is poured in all at once, see https://en.wikipedia.org/wiki/Leaky_bucket

+

Parameters:

+
    +
  • label (string)
  • +
  • leak rate volume (int)
  • +
  • leak rate period (time.Duration)
  • +
  • capacity (int)
  • +
  • increment (int)
  • +
+

The bucket label, leak rate (volume/period) and capacity uniquely identify the bucket.

+

Label supports template placeholders. +If a template placeholder can’t be resolved then request is allowed and does not add to any bucket.

+

Leak rate (divided by increment) defines a maximum average allowed request rate. +The rate is configured by two parameters for convenience and consistency with other filters but is actually a single number, +e.g. the rate of 2 per second equals to the rate of 20 per 10 seconds or 120 per minute.

+

Capacity defines the maximum request burst size or an allowed jitter.

+

Each passing request adds increment amount to the bucket, different routes may add different amounts to the same bucket.

+

Configuration with equal capacity and increment allows no jitter: first request fills up the bucket full and +subsequent request will be rejected if it arrives earlier than emission interval = 1/leak rate.

+

Real requests always have a jitter which can be demonstrated by the configuration having capacity and increment of one: +

r1: * -> clusterLeakyBucketRatelimit("1rps", 1, "1s", 1, 1) -> status(200) -> <shunt>;
+
+it does not allow jitter and therefore rejects ~half of the requests coming at rate of 1 rps: +
$ echo "GET http://localhost:9090" | vegeta attack -rate=1/s -duration=1m | vegeta report
+Requests      [total, rate, throughput]  60, 1.02, 0.58
+Duration      [total, attack, wait]      59.001991855s, 59.000310522s, 1.681333ms
+Latencies     [mean, 50, 95, 99, max]    1.721207ms, 1.555227ms, 1.943115ms, 10.689486ms, 11.538278ms
+Bytes In      [total, mean]              0, 0.00
+Bytes Out     [total, mean]              0, 0.00
+Success       [ratio]                    56.67%
+Status Codes  [code:count]               200:34  429:26
+Error Set:
+429 Too Many Requests
+

+

On the other hand the configuration with capacity greater than increment: +

r1: * -> clusterLeakyBucketRatelimit("1rps2", 1, "1s", 2, 1) -> status(200) -> <shunt>;
+
+allows all requests: +
~$ echo "GET http://localhost:9090" | vegeta attack -rate=1/s -duration=1m | vegeta report
+Requests      [total, rate, throughput]  60, 1.02, 1.02
+Duration      [total, attack, wait]      59.00023518s, 58.999779118s, 456.062µs
+Latencies     [mean, 50, 95, 99, max]    1.410641ms, 1.585908ms, 1.859727ms, 8.285963ms, 8.997149ms
+Bytes In      [total, mean]              0, 0.00
+Bytes Out     [total, mean]              0, 0.00
+Success       [ratio]                    100.00%
+Status Codes  [code:count]               200:60
+Error Set:
+
+and even if rate is greater than 1 rps the average allowed request rate is still equal to the leak rate of 1 rps: +
$ echo "GET http://localhost:9090" | vegeta attack -rate=11/10s -duration=1m | vegeta report
+Requests      [total, rate, throughput]  66, 1.12, 1.03
+Duration      [total, attack, wait]      59.091880389s, 59.089985762s, 1.894627ms
+Latencies     [mean, 50, 95, 99, max]    1.709568ms, 1.60613ms, 1.925731ms, 10.601822ms, 12.10052ms
+Bytes In      [total, mean]              0, 0.00
+Bytes Out     [total, mean]              0, 0.00
+Success       [ratio]                    92.42%
+Status Codes  [code:count]               200:61  429:5
+Error Set:
+429 Too Many Requests
+

+

Therefore the capacity should be configured greater than increment unless strict request interval needs to be enforced. +Configuration having capacity below increment rejects all requests.

+

Examples: +

// allow each unique Authorization header once in five seconds
+clusterLeakyBucketRatelimit("auth-${request.header.Authorization}", 1, "5s", 2, 1)
+
+// allow 60 requests per hour (each subsequent request allowed not earlied than after 1h/60 = 1m) for all clients
+clusterLeakyBucketRatelimit("hourly", 60, "1h", 1, 1)
+
+// allow 10 requests per minute for each unique PHPSESSID cookie with bursts of up to 5 requests
+clusterLeakyBucketRatelimit("session-${request.cookie.PHPSESSID}", 10, "1m", 5, 1)
+
+// use the same bucket but add different amount (i.e. one /expensive request counts as two /cheap)
+Path("/cheap")     -> clusterLeakyBucketRatelimit("user-${request.cookie.Authorization}", 1, "1s", 5, 1) -> ...
+Path("/expensive") -> clusterLeakyBucketRatelimit("user-${request.cookie.Authorization}", 1, "1s", 5, 2) -> ...
+

+

ratelimitFailClosed

+

This filter changes the failure mode for all rate limit filters of the route. +By default rate limit filters fail open on infrastructure errors (e.g. when redis is down) and allow requests. +When this filter is present on the route, rate limit filters will fail closed in case of infrastructure errors and deny requests.

+

Examples: +

fail_open: * -> clusterRatelimit("g",10, "1s")
+fail_closed: * -> ratelimitFailClosed() -> clusterRatelimit("g", 10, "1s")
+

+

In case clusterRatelimit could not reach the swarm (e.g. redis):

+
    +
  • Route fail_open will allow the request
  • +
  • Route fail_closed will deny the request
  • +
+

Load Shedding

+

The basic idea of load shedding is to reduce errors by early stopping +some of the ingress requests that create too much load and serving +the maximum throughput the system can process at a point in time.

+

There is a great talk by Acacio Cruz from +Google +that explains the basic principles.

+

admissionControl

+

Implements an admission control filter, that rejects traffic by +observed error rate and probability. If it rejects a request skipper will +respond with status code 503.

+

The probability of rejection is calculated by the following equation:

+
\[ P_{reject} = ( { n_{total} - { n_{success} \over threshold } \over n_{total} + 1} )^{ exponent } \]
+

Examples:

+
admissionControl(metricSuffix, mode, d, windowSize, minRPS, successThreshold, maxRejectProbability, exponent)
+admissionControl("myapp", "active", "1s", 5, 10, 0.95, 0.9, 0.5)
+
+ +

Parameters:

+
    +
  • metric suffix (string)
  • +
  • mode (enum)
  • +
  • d (time.Duration)
  • +
  • window size (int)
  • +
  • minRps (int)
  • +
  • success threshold (float64)
  • +
  • max reject probability (float64)
  • +
  • exponent (float64)
  • +
+

Metric suffix is the chosen suffix key to expose reject counter, +should be unique by filter instance

+

Mode has 3 different possible values:

+
    +
  • “active” will reject traffic
  • +
  • “inactive” will never reject traffic
  • +
  • “logInactive” will not reject traffic, but log to debug filter settings
  • +
+

D the time duration of a single slot for required counters in our +circular buffer of window size.

+

Window size is the size of the circular buffer. It is used to snapshot +counters to calculate total requests and number of success. It is +within \([1, 100]\).

+

MinRps is the minimum requests per second that have to pass this filter +otherwise it will not reject traffic.

+

Success threshold sets the lowest request success rate at which the +filter will not reject requests. It is within \((0,1]\). A value of +0.95 means an error rate of lower than 5% will not trigger +rejects.

+

Max reject probability sets the upper bound of reject probability. It +is within (0,1]. A value of 0.95 means if backend errors with 100% it +will only reject up to 95%.

+

exponent is used to dictate the rejection probability. The +calculation is done by \(p = p^{exponent}\) +The exponent value is within \((0,\infty]\), to increase rejection +probability you have to use values lower than 1:

+
    +
  • 1: linear
  • +
  • 1/2: quadratic
  • +
  • 1/3: cubic
  • +
+

lua

+

See the scripts page

+

Logs

+

accessLogDisabled

+

Deprecated: use disableAccessLog or enableAccessLog

+

The accessLogDisabled filter overrides global Skipper AccessLogDisabled setting for a specific route, which allows to either turn-off +the access log for specific route while access log, in general, is enabled or vice versa.

+

Example:

+
accessLogDisabled("false")
+
+

disableAccessLog

+

Filter overrides global Skipper AccessLogDisabled setting and allows to turn-off the access log for specific route +while access log, in general, is enabled. It is also possible to disable access logs only for a subset of response codes +from backend by providing an optional list of response code prefixes.

+

Parameters:

+
    +
  • response code prefixes (variadic int) - optional
  • +
+

Example:

+
disableAccessLog()
+disableAccessLog(1, 301, 40)
+
+

This disables logs of all requests with status codes 1xxs, 301 and all 40xs.

+

enableAccessLog

+

Filter overrides global Skipper AccessLogDisabled setting and allows to turn-on the access log for specific route +while access log, in general, is disabled. It is also possible to enable access logs only for a subset of response codes +from backend by providing an optional list of response code prefixes.

+

Parameters:

+
    +
  • response code prefixes (variadic int) - optional
  • +
+

Example:

+
enableAccessLog()
+enableAccessLog(1, 301, 20)
+
+

This enables logs of all requests with status codes 1xxs, 301 and all 20xs.

+

auditLog

+

Filter auditLog() logs the request and N bytes of the body into the +log file. N defaults to 1024 and can be overridden with +-max-audit-body=<int>. N=0 omits logging the body.

+

Example:

+
auditLog()
+
+

unverifiedAuditLog

+

Filter unverifiedAuditLog() adds a Header, X-Unverified-Audit, to the request, the content of which, will also +be written to the log file. By default, the value of the audit header will be equal to the value of the sub key, from +the Authorization token. This can be changed by providing a string input to the filter which matches another key from the +token.

+

N.B. It is important to note that, if the content of the X-Unverified-Audit header does not match the following regex, then +a default value of invalid-sub will be populated in the header instead: + ^[a-zA-Z0-9_/:?=&%@.#-]*$

+

Examples:

+
unverifiedAuditLog()
+
+
unverifiedAuditLog("azp")
+
+

Backend

+

backendIsProxy

+

Notifies the proxy that the backend handling this request is also a +proxy. The proxy type is based in the URL scheme which can be either +http, https or socks5.

+

Keep in mind that Skipper currently cannot handle CONNECT requests +by tunneling the traffic to the target destination, however, the +CONNECT requests can be forwarded to a different proxy using this +filter.

+

Example:

+
foo1:
+  *
+  -> backendIsProxy()
+  -> "http://proxy.example.com";
+
+foo2:
+  *
+  -> backendIsProxy()
+  -> <roundRobin, "http://proxy1.example.com", "http://proxy2.example.com">;
+
+foo3:
+  *
+  -> setDynamicBackendUrl("http://proxy.example.com")
+  -> backendIsProxy()
+  -> <dynamic>;
+
+

setDynamicBackendHostFromHeader

+

Filter sets the backend host for a route, value is taken from the provided header. +Can be used only with <dynamic> backend. Meant to be used together with setDynamicBackendSchemeFromHeader +or setDynamicBackendScheme. If this filter chained together with setDynamicBackendUrlFromHeader +or setDynamicBackendUrl filters, the latter ones would have priority.

+

Parameters:

+
    +
  • header name (string)
  • +
+

Example:

+
foo: * -> setDynamicBackendHostFromHeader("X-Forwarded-Host") -> <dynamic>;
+
+

setDynamicBackendSchemeFromHeader

+

Filter sets the backend scheme for a route, value is taken from the provided header. +Can be used only with <dynamic> backend. Meant to be used together with setDynamicBackendHostFromHeader +or setDynamicBackendHost. If this filter chained together with +setDynamicBackendUrlFromHeader or setDynamicBackendUrl, the latter ones would have priority.

+

Parameters:

+
    +
  • header name (string)
  • +
+

Example:

+
foo: * -> setDynamicBackendSchemeFromHeader("X-Forwarded-Proto") -> <dynamic>;
+
+

setDynamicBackendUrlFromHeader

+

Filter sets the backend url for a route, value is taken from the provided header. +Can be used only with <dynamic> backend.

+

Parameters:

+
    +
  • header name (string)
  • +
+

Example:

+
foo: * -> setDynamicBackendUrlFromHeader("X-Custom-Url") -> <dynamic>;
+
+

setDynamicBackendHost

+

Filter sets the backend host for a route. Can be used only with <dynamic> backend. +Meant to be used together with setDynamicBackendSchemeFromHeader +or setDynamicBackendScheme. If this filter chained together with setDynamicBackendUrlFromHeader +or setDynamicBackendUrl, the latter ones would have priority.

+

Parameters:

+
    +
  • host (string)
  • +
+

Example:

+
foo: * -> setDynamicBackendHost("example.com") -> <dynamic>;
+
+

setDynamicBackendScheme

+

Filter sets the backend scheme for a route. Can be used only with <dynamic> backend. +Meant to be used together with setDynamicBackendHostFromHeader +or setDynamicBackendHost. If this filter chained together with +setDynamicBackendUrlFromHeader or setDynamicBackendUrl, the latter ones would have priority.

+

Parameters:

+
    +
  • scheme (string)
  • +
+

Example:

+
foo: * -> setDynamicBackendScheme("https") -> <dynamic>;
+
+

setDynamicBackendUrl

+

Filter sets the backend url for a route. Can be used only with <dynamic> backend.

+

Parameters:

+
    +
  • url (string)
  • +
+

Example:

+
foo: * -> setDynamicBackendUrl("https://example.com") -> <dynamic>;
+
+

apiUsageMonitoring

+

The apiUsageMonitoring filter adds API related metrics to the Skipper monitoring. It is by default not activated. Activate +it by providing the -enable-api-usage-monitoring flag at Skipper startup. In its deactivated state, it is still +registered as a valid filter (allowing route configurations to specify it), but will perform no operation. That allows, +per instance, production environments to use it and testing environments not to while keeping the same route configuration +for all environments.

+

For the client based metrics, additional flags need to be specified.

+ + + + + + + + + + + + + + + + + + + + + +
FlagDescription
api-usage-monitoring-realm-keysName of the property in the JWT JSON body that contains the name of the realm.
api-usage-monitoring-client-keysName of the property in the JWT JSON body that contains the name of the client.
api-usage-monitoring-realms-tracking-patternRegEx of realms to be monitored. Defaults to ‘services’.
+

NOTE: Make sure to activate the metrics flavour proper to your environment using the metrics-flavour +flag in order to get those metrics.

+

Example:

+
skipper -metrics-flavour prometheus -enable-api-usage-monitoring -api-usage-monitoring-realm-keys="realm" -api-usage-monitoring-client-keys="managed-id" api-usage-monitoring-realms-tracking-pattern="services,users"
+
+

The structure of the metrics is all of those elements, separated by . dots:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PartDescription
apiUsageMonitoring.customEvery filter metrics starts with the name of the filter followed by custom. This part is constant.
Application IDIdentifier of the application, configured in the filter under app_id.
TagTag of the application (e.g. staging), configured in the filter under tag.
API IDIdentifier of the API, configured in the filter under api_id.
MethodThe request’s method (verb), capitalized (ex: GET, POST, PUT, DELETE).
PathThe request’s path, in the form of the path template configured in the filter under path_templates.
RealmThe realm in which the client is authenticated.
ClientIdentifier under which the client is authenticated.
Metric NameName (or key) of the metric being tracked.
+

Available Metrics

+ +

Those metrics are not identifying the realm and client. They always have * in their place.

+

Example:

+
                                                                                      + Realm
+                                                                                      |
+apiUsageMonitoring.custom.orders-backend.staging.orders-api.GET.foo/orders/{order-id}.*.*.http_count
+                                                                                        | |
+                                                                                        | + Metric Name
+                                                                                        + Client
+
+

The available metrics are:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeMetric NameDescription
Counterhttp_countnumber of HTTP exchanges
Counterhttp1xx_countnumber of HTTP exchanges resulting in information (HTTP status in the 100s)
Counterhttp2xx_countnumber of HTTP exchanges resulting in success (HTTP status in the 200s)
Counterhttp3xx_countnumber of HTTP exchanges resulting in a redirect (HTTP status in the 300s)
Counterhttp4xx_countnumber of HTTP exchanges resulting in a client error (HTTP status in the 400s)
Counterhttp5xx_countnumber of HTTP exchanges resulting in a server error (HTTP status in the 500s)
Histogramlatencytime between the first observable moment (a call to the filter’s Request) until the last (a call to the filter’s Response)
+ +

Those metrics are not identifying endpoint (path) and HTTP verb. They always have * as their place.

+

Example:

+
                                                            + HTTP Verb
+                                                            | + Path Template     + Metric Name
+                                                            | |                   |
+apiUsageMonitoring.custom.orders-backend.staging.orders-api.*.*.users.mmustermann.http_count
+                                                                |     |
+                                                                |     + Client
+                                                                + Realm
+
+

The available metrics are:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeMetric NameDescription
Counterhttp_countnumber of HTTP exchanges
Counterhttp1xx_countnumber of HTTP exchanges resulting in information (HTTP status in the 100s)
Counterhttp2xx_countnumber of HTTP exchanges resulting in success (HTTP status in the 200s)
Counterhttp3xx_countnumber of HTTP exchanges resulting in a redirect (HTTP status in the 300s)
Counterhttp4xx_countnumber of HTTP exchanges resulting in a client error (HTTP status in the 400s)
Counterhttp5xx_countnumber of HTTP exchanges resulting in a server error (HTTP status in the 500s)
Counterlatency_sumsum of seconds (in decimal form) between the first observable moment (a call to the filter’s Request) until the last (a call to the filter’s Response)
+

Filter Configuration

+

Endpoints can be monitored using the apiUsageMonitoring filter in the route. It accepts JSON objects (as strings) +of the format mentioned below. In case any of the required parameters is missing, no-op filter is created, i.e. no +metrics are captured, but the creation of the route does not fail.

+
api-usage-monitoring-configuration:
+  type: object
+  required:
+    - application_id
+    - api_id
+    - path_templates
+  properties:
+    application_id:
+      type: string
+      description: ID of the application
+      example: order-service
+    tag:
+      type: string
+      description: tag of the application
+      example: staging
+    api_id:
+      type: string
+      description: ID of the API
+      example: orders-api
+    path_templates:
+      description: Endpoints to be monitored.
+      type: array
+      minLength: 1
+      items:
+        type: string
+        description: >
+          Path template in /articles/{article-id} (OpenAPI 3) or in /articles/:article-id format.
+          NOTE: They will be normalized to the :this format for metrics naming.
+        example: /orders/{order-id}
+    client_tracking_pattern:
+        description: >
+            The pattern that matches client id in form of a regular expression.
+
+            By default (if undefined), it is set to `.*`.
+
+            An empty string disables the client metrics completely.
+        type: string
+        examples:
+            all_services:
+                summary: All services are tracked (for all activated realms).
+                value: ".*"
+            just_some_services:
+                summary: Only services `orders-service` and `shipment-service` are tracked.
+                value: "(orders\-service|shipment\-service)"
+
+

Configuration Example:

+
apiUsageMonitoring(`
+    {
+        "application_id": "my-app",
+        "tag": "staging",
+        "api_id": "orders-api",
+        "path_templates": [
+            "foo/orders",
+            "foo/orders/:order-id",
+            "foo/orders/:order-id/order_item/{order-item-id}"
+        ],
+        "client_tracking_pattern": "(shipping\-service|payment\-service)"
+    }`,`{
+        "application_id": "my-app",
+        "api_id": "customers-api",
+        "path_templates": [
+            "/foo/customers/",
+            "/foo/customers/{customer-id}/"
+        ]
+    }
+`)
+
+

Based on the previous configuration, here is an example of a counter metric.

+
apiUsageMonitoring.custom.my-app.staging.orders-api.GET.foo/orders/{order-id}.*.*.http_count
+
+

Note that a missing tag in the configuration will be replaced by {no-tag} in the metric:

+
apiUsageMonitoring.custom.my-app.{no-tag}.customers-api.GET.foo/customers.*.*.http_count
+
+

Here is the Prometheus query to obtain it.

+
sum(rate(skipper_custom_total{key="apiUsageMonitoring.custom.my-app.staging.orders-api.GET.foo/orders/{order-id}.*.*.http_count"}[60s])) by (key)
+
+

Here is an example of a histogram metric.

+
apiUsageMonitoring.custom.my_app.staging.orders-api.POST.foo/orders.latency
+
+

Here is the Prometheus query to obtain it.

+
histogram_quantile(0.5, sum(rate(skipper_custom_duration_seconds_bucket{key="apiUsageMonitoring.custom.my-app.staging.orders-api.POST.foo/orders.*.*.latency"}[60s])) by (le, key))
+
+

NOTE: Non configured paths will be tracked with {unknown} Application ID, Tag, API ID +and path template.

+

However, if all application_ids of your configuration refer to the same application, +the filter assume that also non configured paths will be directed to this application. +E.g.:

+
apiUsageMonitoring.custom.my-app.{unknown}.{unknown}.GET.{no-match}.*.*.http_count
+
+

originMarker

+

This filter is used to measure the time it took to create a route. Other than that, it’s a no-op. +You can include the same origin marker when you re-create the route. As long as the origin and id are the same, the route creation time will not be measured again. +If there are multiple origin markers with the same origin, the earliest timestamp will be used.

+

Parameters:

+
    +
  • the name of the origin
  • +
  • the ID of the object that is the logical source for the route
  • +
  • the creation timestamp (rfc3339)
  • +
+

Example:

+
originMarker("apiUsageMonitoring", "deployment1", "2019-08-30T09:55:51Z")
+
+

Scheduler

+

fifo

+

This Filter is similar to the lifo filter in regards to +parameters and status codes.

+

It turned out that lifo() filter can hurt performance at high load. +On AWS instance c6g.8xlarge lifo filter had a limit of 21000 +requests per second on a single instance. The fifo() filter had not +hit a limit at 30000 requests per second. +If you use TCP-LIFO, then request processing is already in LIFO style.

+

Parameters:

+
    +
  • MaxConcurrency specifies how many goroutines are allowed to work on this queue (int)
  • +
  • MaxQueueSize sets the queue size (int)
  • +
  • Timeout sets the timeout to get request scheduled (time)
  • +
+

Example:

+
fifo(100, 150, "10s")
+
+

fifoWithBody

+

This Filter is similar to the lifo filter in regards to +parameters and status codes. +Performance considerations are similar to fifo.

+

The difference between fifo and fifoWithBody is that fifo will decrement +the concurrency as soon as the backend sent response headers and +fifoWithBody will decrement the concurrency if the response body was +served. Normally both are very similar, but if you have a fully async +component that serves multiple website fragments, this would decrement +concurrency too early.

+

Parameters:

+
    +
  • MaxConcurrency specifies how many goroutines are allowed to work on this queue (int)
  • +
  • MaxQueueSize sets the queue size (int)
  • +
  • Timeout sets the timeout to get request scheduled (time)
  • +
+

Example:

+
fifoWithBody(100, 150, "10s")
+
+

lifo

+

This Filter changes skipper to handle the route with a bounded last in +first out queue (LIFO), instead of an unbounded first in first out +queue (FIFO). The default skipper scheduler is based on Go net/http +package, which provides an unbounded FIFO request handling. If you +enable this filter the request scheduling will change to a LIFO. The +idea of a LIFO queue is based on Dropbox bandaid proxy, which is not +opensource. Dropbox shared their idea in a +public blogpost. +All bounded scheduler filters will respond requests with server status error +codes in case of overrun.

+

All scheduler filters return HTTP status code:

+
    +
  • 502, if the specified timeout is reached, because a request could not be scheduled fast enough
  • +
  • 503, if the queue is full
  • +
+

Parameters:

+
    +
  • MaxConcurrency specifies how many goroutines are allowed to work on this queue(int)
  • +
  • MaxQueueSize sets the queue size (int)
  • +
  • Timeout sets the timeout to get request scheduled (time)
  • +
+

Example:

+
lifo(100, 150, "10s")
+
+

The above configuration will set MaxConcurrency to 100, MaxQueueSize +to 150 and Timeout to 10 seconds.

+

When there are multiple lifo filters on the route, only the last one will be applied.

+

lifoGroup

+

This filter is similar to the lifo filter.

+

Parameters:

+
    +
  • GroupName to group multiple one or many routes to the same queue, which have to have the same settings (string)
  • +
  • MaxConcurrency specifies how many goroutines are allowed to work on this queue(int)
  • +
  • MaxQueueSize sets the queue size (int)
  • +
  • Timeout sets the timeout to get request scheduled (time)
  • +
+

Example:

+
lifoGroup("mygroup", 100, 150, "10s")
+
+

The above configuration will set MaxConcurrency to 100, MaxQueueSize +to 150 and Timeout to 10 seconds for the lifoGroup “mygroup”, that can +be shared between multiple routes.

+

It is enough to set the concurrency, queue size and timeout parameters for one instance of +the filter in the group, and only the group name for the rest. Setting these values for +multiple instances is fine, too. While only one of them will be used as the source for the +applied settings, if there is accidentally a difference between the settings in the same +group, a warning will be logged.

+

It is possible to use the lifoGroup filter together with the single lifo filter, e.g. if +a route belongs to a group, but needs to have additional stricter settings then the whole +group.

+

RFC Compliance

+

rfcHost

+

This filter removes the optional trailing dot in the outgoing host +header.

+

Example:

+
rfcHost()
+
+

rfcPath

+

This filter forces an alternative interpretation of the RFC 2616 and RFC 3986 standards, +where paths containing reserved characters will have these characters unescaped when the +incoming request also has them unescaped.

+

Example:

+
Path("/api/*id) -> rfcPath() -> "http://api-backend"
+
+

In the above case, if the incoming request has something like foo%2Fbar in the id +position, the api-backend service will also receive it in the format foo%2Fbar, while +without the rfcPath() filter the outgoing request path will become /api/foo/bar.

+

In case we want to use the id while routing the request, we can use the +backend. Example:

+
api: Path("/api/:id") -> setPath("/api/${id}/summary") -> "http://api-backend";
+patch: Path("/api/*id") -> rfcPath() -> <loopback>;
+
+

In the above case, if the incoming request path is /api/foo%2Fbar, it will match +the ‘patch’ route, and then the patched request will match the api route, and +the api-backend service will receive a request with the path /api/foo%2Fbar/summary.

+

It is also possible to enable this behavior centrally for a Skipper instance with +the -rfc-patch-path flag. See +URI standards interpretation.

+

Egress

+

setRequestHeaderFromSecret

+

This filter sets request header to the secret value with optional prefix and suffix. +This is only for use cases using skipper as sidecar to inject tokens for the application on the +egress path, if it’s used in the ingress path you likely +create a security issue for your application.

+

This filter should be used as an egress only feature.

+

Parameters:

+
    +
  • header name (string)
  • +
  • secret name (string)
  • +
  • value prefix (string) - optional
  • +
  • value suffix (string) - optional
  • +
+

Example:

+
egress1: Method("GET") -> setRequestHeaderFromSecret("Authorization", "/tmp/secrets/get-token") -> "https://api.example.com";
+egress2: Method("POST") -> setRequestHeaderFromSecret("Authorization", "/tmp/secrets/post-token", "foo-") -> "https://api.example.com";
+egress3: Method("PUT") -> setRequestHeaderFromSecret("X-Secret", "/tmp/secrets/put-token", "bar-", "-baz") -> "https://api.example.com";
+
+

To use setRequestHeaderFromSecret filter you need to run skipper +with -credentials-paths=/tmp/secrets and specify an update interval -credentials-update-interval=10s. +Files in the credentials path can be a directory, which will be able to find all files within this +directory, but it won’t walk subtrees. +For the example case, there have to be get-token, post-token and put-token files within the +specified credential paths /tmp/secrets/, resulting in +/tmp/secrets/get-token, /tmp/secrets/post-token and /tmp/secrets/put-token.

+

bearerinjector

+

This filter injects Bearer tokens into Authorization headers read +from file providing the token as content.

+

It is a special form of setRequestHeaderFromSecret with "Authorization" header name, +"Bearer " prefix and empty suffix.

+

Example:

+
egress: * -> bearerinjector("/tmp/secrets/my-token") -> "https://api.example.com";
+
+// equivalent to setRequestHeaderFromSecret("Authorization", "/tmp/secrets/my-token", "Bearer ")
+
+

Open Tracing

+

tracingBaggageToTag

+

This filter adds an opentracing tag for a given baggage item in the trace.

+

Syntax: +

tracingBaggageToTag("<baggage_item_name>", "<tag_name>")
+

+

Example: If a trace consists of baggage item named foo with a value bar. Adding below filter will add a tag named baz with value bar +

tracingBaggageToTag("foo", "baz")
+

+

stateBagToTag

+

This filter sets an opentracing tag from the filter context (state bag). +If the provided key (first parameter) cannot be found in the state bag, then it doesn’t set the tag.

+

Parameters:

+
    +
  • key in the state bag (string)
  • +
  • tag name (string)
  • +
+

The route in the following example checks whether the request is authorized with the +oauthTokeninfoAllScope() filter. This filter stores the authenticated user with “auth-user” +key in the context, and the stateBagToTag() filter in the next step stores it in +the opentracing tag “client_id”:

+
foo: * -> oauthTokeninfoAllScope("address_service.all") -> stateBagToTag("auth-user", "client_id") -> "https://backend.example.org";
+
+

tracingTag

+

This filter adds an opentracing tag.

+

Syntax: +

tracingTag("<tag_name>", "<tag_value>")
+

+

Tag value may contain template placeholders. +If a template placeholder can’t be resolved then filter does not set the tag.

+

Example: Adding the below filter will add a tag named foo with the value bar. +

tracingTag("foo", "bar")
+

+

Example: Set tag from request header +

tracingTag("http.flow_id", "${request.header.X-Flow-Id}")
+

+

tracingTagFromResponse

+

This filter works just like tracingTag, but is applied after the request was processed. In particular, template placeholders referencing the response can be used in the parameters.

+

tracingSpanName

+

This filter sets the name of the outgoing (client span) in opentracing. The default name is “proxy”. Example:

+
tracingSpanName("api-operation")
+
+

Load Balancing

+

Some filters influence how load balancing will be done

+

fadeIn

+

When this filter is set, and the route has a load balanced backend using supported algorithm, +then the newly added endpoints will receive +the traffic in a gradually increasing way, starting from their detection for the specified duration, after which +they receive equal amount traffic as the previously existing routes. The detection time of an load balanced +backend endpoint is preserved over multiple generations of the route configuration (over route changes). This +filter can be used to saturate the load of autoscaling applications that require a warm-up time and therefore a +smooth ramp-up. The fade-in feature can be used together with the roundRobin, random or consistentHash LB algorithms.

+

While the default fade-in curve is linear, the optional exponent parameter can be used to adjust the shape of +the fade-in curve, based on the following equation:

+

current_rate = proportional_rate * min((now - detected) / duration, 1) ^ exponent

+

Parameters:

+
    +
  • duration: duration of the fade-in in milliseconds or as a duration string
  • +
  • fade-in curve exponent - optional: a floating point number, default: 1
  • +
+

Examples:

+
fadeIn("3m")
+fadeIn("3m", 1.5)
+
+

Warning on fadeIn and Rolling Restarts

+

Traffic fade-in has the potential to skew the traffic to your backend pods in case of a rolling restart +(kubectl rollout restart), because it is very likely that the rolling restart is going faster than the +fade-in duration. The image below shows an example of a rolling restart for a four-pod deployment (A, B, C, D) +into (E, F, G, H), and the traffic share of each pod over time. While the ramp-up of the new pods is ongoing, +the remaining old pods will receive a largely increased traffic share (especially the last one, D in this +example), as well as an over-propotional traffic share for the first pod in the rollout (E).

+

To make rolling restarts safe, you need to slow them down by setting spec.minReadySeconds on the pod spec +of your deployment or stackset, according to your fadeIn duration.

+

Rolling Restart and Fade-In

+

endpointCreated

+

This filter marks the creation time of a load balanced endpoint. When used together with the fadeIn +filter, it prevents missing the detection of a new backend instance with the same hostname. This filter is +typically automatically appended, and it’s parameters are based on external sources, e.g. the Kubernetes API.

+

Parameters:

+
    +
  • the address of the endpoint
  • +
  • timestamp, either as a number of seconds since the unix epocs, or a string in RFC3339 format
  • +
+

Example:

+
endpointCreated("http://10.0.0.1:8080", "2020-12-18T15:30:00Z01:00")
+
+

consistentHashKey

+

This filter sets the request key used by the consistentHash algorithm to select the backend endpoint.

+

Parameters:

+
    +
  • key (string)
  • +
+

The key should contain template placeholders, without placeholders the key +is constant and therefore all requests would be made to the same endpoint. +The algorithm will use the default key if any of the template placeholders can’t be resolved.

+

Examples:

+

pr: Path("/products/:productId")
+    -> consistentHashKey("${productId}")
+    -> <consistentHash, "http://127.0.0.1:9998", "http://127.0.0.1:9997">;
+
+
consistentHashKey("${request.header.Authorization}")
+consistentHashKey("${request.source}") // same as the default key
+

+

consistentHashBalanceFactor

+

This filter sets the balance factor used by the consistentHash algorithm to prevent a single backend endpoint from being overloaded. +The number of in-flight requests for an endpoint can be no higher than (average-in-flight-requests * balanceFactor) + 1. +This is helpful in the case where certain keys are very popular and threaten to overload the endpoint they are mapped to. +Further Details.

+

Parameters:

+
    +
  • balanceFactor: A float or int, must be >= 1
  • +
+

Examples:

+

pr: Path("/products/:productId")
+    -> consistentHashKey("${productId}")
+    -> consistentHashBalanceFactor(1.25)
+    -> <consistentHash, "http://127.0.0.1:9998", "http://127.0.0.1:9997">;
+
+
consistentHashBalanceFactor(3)
+

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/reference/plugins/index.html b/reference/plugins/index.html new file mode 100644 index 0000000000..f517afdd87 --- /dev/null +++ b/reference/plugins/index.html @@ -0,0 +1,1850 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Plugins - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Skipper plugins

+

Skipper may be extended with functionality not present in the core. +These additions can be built as go plugin, so they do not have to +be present in the main skipper repository.

+

Note the warning from Go’s plugin.go:

+
// The plugin support is currently incomplete, only supports Linux,
+// and has known bugs. Please report any issues.
+
+ +

Note the known problem of using plugins together with vendoring, best +described here:

+

https://github.com/golang/go/issues/20481

+

Plugin directories

+

Plugins are loaded from sub directories of the plugin directories. By default +the plugin directory is set to ./plugins (i.e. relative to skipper’s working +directory). An additional directory may be given with the -plugindir=/path/to/dir +option to skipper.

+

Any file with the suffix .so found below the plugin directories (also in sub +directories) is attempted to load without any arguments. When a plugin needs an +argument, this must be explicitly loaded and the arguments passed, e.g. with +-filter-plugin geoip,db=/path/to/db.

+

Building a plugin

+

Each plugin should be built with Go version >= 1.11, enabled Go +modules support similar to the following build command line:

+
go build -buildmode=plugin -o example.so example.go
+
+

There are some pitfalls:

+
    +
  • packages which are shared between skipper and the plugin must not be in + a vendor/ directory, otherwise the plugin will fail to load or in some + cases give wrong results (e.g. an opentracing span cannot be found in the + context even if it is present). This also means: + Do not vendor skipper in a plugin repo…
  • +
  • plugins must be rebuilt when skipper is rebuilt
  • +
  • do not attempt to rebuild a module and copy it over a loaded plugin, that + will crash skipper immediately…
  • +
+

Use a plugin

+

In this example we use a geoip database, that you need to find and download. +We expect that you did a git clone git@github.com:zalando/skipper.git and +entered the directory.

+

Build skipper:

+
% make skipper
+
+

Install filter plugins:

+
% mkdir plugins
+% git clone git@github.com:skipper-plugins/filters.git plugins/filters
+% ls plugins/filters
+geoip/  glide.lock  glide.yaml  ldapauth/  Makefile  noop/  plugin_test.go
+% cd plugins/filters/geoip
+% go build -buildmode=plugin -o geoip.so geoip.go
+% cd -
+~/go/src/github.com/zalando/skipper
+
+

Start a pseudo backend that shows all headers in plain:

+
% nc -l 9000
+
+

Run the proxy with geoip database:

+
% ./bin/skipper -filter-plugin geoip,db=$HOME/Downloads/GeoLite2-City_20181127/GeoLite2-City.mmdb -inline-routes '* -> geoip() -> "http://127.0.0.1:9000"'
+[APP]INFO[0000] found plugin geoip at plugins/filters/geoip/geoip.so
+[APP]INFO[0000] loaded plugin geoip (geoip) from plugins/filters/geoip/geoip.so
+[APP]INFO[0000] attempting to load plugin from plugins/filters/geoip/geoip.so
+[APP]INFO[0000] plugin geoip already loaded with InitFilter
+[APP]INFO[0000] Expose metrics in codahale format
+[APP]INFO[0000] support listener on :9911
+[APP]INFO[0000] proxy listener on :9090
+[APP]INFO[0000] route settings, reset, route: : * -> geoip() -> "http://127.0.0.1:9000"
+[APP]INFO[0000] certPathTLS or keyPathTLS not found, defaulting to HTTP
+[APP]INFO[0000] route settings received
+[APP]INFO[0000] route settings applied
+
+

Or passing a yaml file via config-file flag:

+
inline-routes: '* -> geoip() -> "http://127.0.0.1:9000"'
+filter-plugin:
+  geoip:
+    - db=$HOME/Downloads/GeoLite2-City_20181127/GeoLite2-City.mmdb
+
+

Use a client to lookup geoip:

+
% curl -H"X-Forwarded-For: 107.12.53.5" localhost:9090/
+^C
+
+

pseudo backend should show X-Geoip-Country header:

+
# nc -l 9000
+GET / HTTP/1.1
+Host: 127.0.0.1:9000
+User-Agent: curl/7.49.0
+Accept: */*
+X-Forwarded-For: 107.12.53.5
+X-Geoip-Country: US
+Accept-Encoding: gzip
+^C
+
+

skipper should show additional log lines, because of the CTRL-C:

+
[APP]ERRO[0082] error while proxying, route  with backend http://127.0.0.1:9000, status code 500: dialing failed false: EOF
+107.12.53.5 - - [28/Nov/2018:14:39:40 +0100] "GET / HTTP/1.1" 500 22 "-" "curl/7.49.0" 2753 localhost:9090 - -
+
+

Filter plugins

+

All plugins must have a function named InitFilter with the following signature

+
func([]string) (filters.Spec, error)
+
+ +

The parameters passed are all arguments for the plugin, i.e. everything after the first +word from skipper’s -filter-plugin parameter. E.g. when the -filter-plugin +parameter is

+
myfilter,datafile=/path/to/file,foo=bar
+
+ +

the myfilter plugin will receive

+
[]string{"datafile=/path/to/file", "foo=bar"}
+
+ +

as arguments.

+

The filter plugin implementation is responsible to parse the received arguments.

+

Filter plugins can be found in the filter repo

+

Example filter plugin

+

An example noop plugin looks like

+
package main
+
+import (
+    "github.com/zalando/skipper/filters"
+)
+
+type noopSpec struct{}
+
+func InitFilter(opts []string) (filters.Spec, error) {
+    return noopSpec{}, nil
+}
+
+func (s noopSpec) Name() string {
+    return "noop"
+}
+func (s noopSpec) CreateFilter(config []interface{}) (filters.Filter, error) {
+    return noopFilter{}, nil
+}
+
+type noopFilter struct{}
+
+func (f noopFilter) Request(filters.FilterContext)  {}
+func (f noopFilter) Response(filters.FilterContext) {}
+
+

Predicate plugins

+

All plugins must have a function named InitPredicate with the following signature

+
func([]string) (routing.PredicateSpec, error)
+
+ +

The parameters passed are all arguments for the plugin, i.e. everything after the first +word from skipper’s -predicate-plugin parameter. E.g. when the -predicate-plugin +parameter is

+
mypred,datafile=/path/to/file,foo=bar
+
+ +

the mypred plugin will receive

+
[]string{"datafile=/path/to/file", "foo=bar"}
+
+ +

as arguments.

+

The predicate plugin implementation is responsible to parse the received arguments.

+

Predicate plugins can be found in the predicate repo

+

Example predicate plugin

+

An example MatchAll plugin looks like

+
package main
+
+import (
+    "github.com/zalando/skipper/routing"
+    "net/http"
+)
+
+type noopSpec struct{}
+
+func InitPredicate(opts []string) (routing.PredicateSpec, error) {
+    return noopSpec{}, nil
+}
+
+func (s noopSpec) Name() string {
+    return "MatchAll"
+}
+func (s noopSpec) Create(config []interface{}) (routing.Predicate, error) {
+    return noopPredicate{}, nil
+}
+
+type noopPredicate struct{}
+
+func (p noopPredicate) Match(*http.Request) bool {
+    return true
+}
+
+

DataClient plugins

+

Similar to the above predicate and filter plugins. The command line option for data +client plugins is -dataclient-plugin. The module must have a InitDataClient +function with the signature

+
func([]string) (routing.DataClient, error)
+
+ +

A noop data client looks like

+
package main
+
+import (
+    "github.com/zalando/skipper/eskip"
+    "github.com/zalando/skipper/routing"
+)
+
+func InitDataClient([]string) (routing.DataClient, error) {
+    var dc DataClient = ""
+    return dc, nil
+}
+
+type DataClient string
+
+func (dc DataClient) LoadAll() ([]*eskip.Route, error) {
+    return eskip.Parse(string(dc))
+}
+
+func (dc DataClient) LoadUpdate() ([]*eskip.Route, []string, error) {
+    return nil, nil, nil
+}
+
+

MultiType plugins

+

Sometimes it is necessary to combine multiple plugin types into one module. This can +be done with this kind of plugin. Note that these modules are not auto loaded, these +need an explicit -multi-plugin name,arg1,arg2 command line switch for skipper.

+

The module must have a InitPlugin function with the signature

+
func([]string) ([]filters.Spec, []routing.PredicateSpec, []routing.DataClient, error)
+
+ +

Any of the returned types may be nil, so you can have e.g. a combined filter / data client +plugin or share a filter and a predicate, e.g. like

+
package main
+
+import (
+    "fmt"
+    "net"
+    "net/http"
+    "strconv"
+    "strings"
+
+    ot "github.com/opentracing/opentracing-go"
+    maxminddb "github.com/oschwald/maxminddb-golang"
+
+    "github.com/zalando/skipper/filters"
+    snet "github.com/zalando/skipper/net"
+    "github.com/zalando/skipper/predicates"
+    "github.com/zalando/skipper/routing"
+)
+
+type geoipSpec struct {
+    db   *maxminddb.Reader
+    name string
+}
+
+func InitPlugin(opts []string) ([]filters.Spec, []routing.PredicateSpec, []routing.DataClient, error) {
+    var db string
+    for _, o := range opts {
+        switch {
+        case strings.HasPrefix(o, "db="):
+            db = o[3:]
+        }
+    }
+    if db == "" {
+        return nil, nil, nil, fmt.Errorf("missing db= parameter for geoip plugin")
+    }
+    reader, err := maxminddb.Open(db)
+    if err != nil {
+        return nil, nil, nil, fmt.Errorf("failed to open db %s: %s", db, err)
+    }
+
+    return []filters.Spec{&geoipSpec{db: reader, name: "geoip"}},
+        []routing.PredicateSpec{&geoipSpec{db: reader, name: "GeoIP"}},
+        nil,
+        nil
+}
+
+func (s *geoipSpec) Name() string {
+    return s.name
+}
+
+func (s *geoipSpec) CreateFilter(config []interface{}) (filters.Filter, error) {
+    var fromLast bool
+    header := "X-GeoIP-Country"
+    var err error
+    for _, c := range config {
+        if s, ok := c.(string); ok {
+            switch {
+            case strings.HasPrefix(s, "from_last="):
+                fromLast, err = strconv.ParseBool(s[10:])
+                if err != nil {
+                    return nil, filters.ErrInvalidFilterParameters
+                }
+            case strings.HasPrefix(s, "header="):
+                header = s[7:]
+            }
+        }
+    }
+    return &geoip{db: s.db, fromLast: fromLast, header: header}, nil
+}
+
+func (s *geoipSpec) Create(config []interface{}) (routing.Predicate, error) {
+    var fromLast bool
+    var err error
+    countries := make(map[string]struct{})
+    for _, c := range config {
+        if s, ok := c.(string); ok {
+            switch {
+            case strings.HasPrefix(s, "from_last="):
+                fromLast, err = strconv.ParseBool(s[10:])
+                if err != nil {
+                    return nil, predicates.ErrInvalidPredicateParameters
+                }
+            default:
+                countries[strings.ToUpper(s)] = struct{}{}
+            }
+        }
+    }
+    return &geoip{db: s.db, fromLast: fromLast, countries: countries}, nil
+}
+
+type geoip struct {
+    db        *maxminddb.Reader
+    fromLast  bool
+    header    string
+    countries map[string]struct{}
+}
+
+type countryRecord struct {
+    Country struct {
+        ISOCode string `maxminddb:"iso_code"`
+    } `maxminddb:"country"`
+}
+
+func (g *geoip) lookup(r *http.Request) string {
+    var src net.IP
+    if g.fromLast {
+        src = snet.RemoteHostFromLast(r)
+    } else {
+        src = snet.RemoteHost(r)
+    }
+
+    record := countryRecord{}
+    err := g.db.Lookup(src, &record)
+    if err != nil {
+        fmt.Printf("geoip(): failed to lookup %s: %s", src, err)
+    }
+    if record.Country.ISOCode == "" {
+        return "UNKNOWN"
+    }
+    return record.Country.ISOCode
+}
+
+func (g *geoip) Request(c filters.FilterContext) {
+    c.Request().Header.Set(g.header, g.lookup(c.Request()))
+}
+
+func (g *geoip) Response(c filters.FilterContext) {}
+
+func (g *geoip) Match(r *http.Request) bool {
+    span := ot.SpanFromContext(r.Context())
+    if span != nil {
+        span.LogKV("GeoIP", "start")
+    }
+
+    code := g.lookup(r)
+    _, ok := g.countries[code]
+
+    if span != nil {
+        span.LogKV("GeoIP", code)
+    }
+    return ok
+}
+
+

OpenTracing plugins

+

The tracers, except for noop, are built as Go Plugins. A tracing plugin can +be loaded with -opentracing NAME as parameter to skipper.

+

Implementations of OpenTracing API can be found in the +https://github.com/skipper-plugins/opentracing repository.

+

All plugins must have a function named InitTracer with the following signature

+
func([]string) (opentracing.Tracer, error)
+
+ +

The parameters passed are all arguments for the plugin, i.e. everything after the first +word from skipper’s -opentracing parameter. E.g. when the -opentracing parameter is +mytracer foo=bar token=xxx somename=bla:3 the “mytracer” plugin will receive

+
[]string{"foo=bar", "token=xxx", "somename=bla:3"}
+
+ +

as arguments.

+

The tracer plugin implementation is responsible to parse the received arguments.

+

An example plugin looks like +

package main
+
+import (
+     basic "github.com/opentracing/basictracer-go"
+     opentracing "github.com/opentracing/opentracing-go"
+)
+
+func InitTracer(opts []string) (opentracing.Tracer, error) {
+     return basic.NewTracerWithOptions(basic.Options{
+         Recorder:       basic.NewInMemoryRecorder(),
+         ShouldSample:   func(traceID uint64) bool { return traceID%64 == 0 },
+         MaxLogsPerSpan: 25,
+     }), nil
+}
+

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/reference/predicates/index.html b/reference/predicates/index.html new file mode 100644 index 0000000000..21e2488ce9 --- /dev/null +++ b/reference/predicates/index.html @@ -0,0 +1,2528 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Predicates - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Skipper Predicates

+

Predicates are used to decide which route will handle an incoming request. Routes can contain multiple +predicates. A request will match a route only if all the predicates of the route match. See the description of +the route matching mechanism here: Route matching.

+

Example route with a Host, Method and Path match predicates and a backend:

+
all: Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello") -> "http://127.0.0.1:1234/";
+
+

Predicate arguments

+

The predicate arguments can be strings, regular expressions or numbers (float64, int). In the eskip syntax +representation:

+
    +
  • strings are surrounded by double quotes ("). When necessary, characters can be escaped by \, e.g. \\ or \".
  • +
  • regular expressions are a re2 regular expression, surrounded by
  • +
  • /, e.g. /^www\.example\.org(:\d+)?$/. When a predicate expects a regular expression as an argument, the string representation with double quotes can be used, as well.
  • +
  • numbers are regular (decimal) numbers like 401 or 1.23456. The eskip syntax doesn’t define a limitation on the size of the numbers, but the underlying implementation currently relies on the float64 values of the Go runtime.
  • +
+

Other higher level argument types must be represented as one of the above types. E.g. it is a convention to +represent time duration values as strings, parseable by time.Duration).

+

The path tree

+

There is an important difference between the evaluation of the Path or PathSubtree predicates, and the +evaluation of all the other predicates (PathRegexp belonging to the second group). Find an explanation in the +Route matching section explanation section.

+

Path

+

The path predicate is used to match the path in HTTP request line. It accepts a single argument, that can be a +fixed path like “/some/path”, or it can contain wildcards. There can be only zero or one path predicate in a +route.

+

Wildcards:

+

Wildcards can be put in place of one or more path segments in the path, e.g. “/some/:dir/:name”, or the path can +end with a free wildcard like "/some/path/*param", where the free wildcard can match against a sub-path with +multiple segments. Note, that this solution implicitly supports the glob standard, e.g. "/some/path/**" will +work as expected. The wildcards must follow a /.

+

The arguments are available to the filters while processing the matched requests, but +currently only a few built-in filters utilize them, and they can be used rather only from custom filter +extensions.

+

Known bug:

+

There is a known bug with how predicates of the form Path("/foo/*") are currently handled. Note the wildcard +defined with * doesn’t have a name here. Wildcards must have a name, but Skipper currently does not reject +these routes, resulting in undefined behavior.

+

Trailing slash:

+

By default, Path("/foo") and Path("/foo/") are not equivalent. Ignoring the trailing slash can be toggled +with the -ignore-trailing-slash command line flag.

+

Examples:

+
Path("/foo/bar")     //   /foo/bar
+Path("/foo/bar/")    //   /foo/bar/, unless started with -ignore-trailing-slash
+Path("/foo/:id")     //   /foo/_anything
+Path("/foo/:id/baz") //   /foo/_anything/baz
+Path("/foo/*rest")   //   /foo/bar/baz
+Path("/foo/**")      //   /foo/bar/baz
+
+

PathSubtree

+

The path subtree predicate behaves similar to the path predicate, but it matches the exact path in the +definition and any sub path below it. The subpath is automatically provided among the path parameters with the +name *. If a free wildcard is appended to the definition, e.g. PathSubtree("/some/path/*rest"), the free +wildcard name is used instead of *. The simple wildcards behave similar to the Path predicate. The main +difference between PathSubtree("/foo") and Path("/foo/**") is that the PathSubtree predicate always ignores +the trailing slashes.

+

Examples:

+
PathSubtree("/foo/bar")
+PathSubtree("/")
+PathSubtree("/foo/*rest")
+
+

PathRegexp

+

Regular expressions to match the path. It uses Go’s standard library +regexp package to match, which is based on +re2 regular expression syntax.

+

Parameters:

+
    +
  • PathRegexp (regex)
  • +
+

A route can contain more than one PathRegexp predicates. It can be also used in combination with the Path +predicate.

+
Path("/colors/:name/rgb-value") && PathRegexp("^/colors/(red|green|blue|cyan|magenta|pink|yellow)/")
+-> returnRGB()
+-> <shunt>
+
+

Further examples:

+
PathRegexp("^/foo/bar")
+PathRegexp("/foo/bar$")
+PathRegexp("/foo/bar/")
+PathRegexp("^/foo/(bar|qux)")
+
+

Host

+

Regular expressions that the host header in the request must match.

+

Parameters:

+
    +
  • Host (regex)
  • +
+

Examples:

+
Host(/^my-host-header\.example\.org$/)
+Host(/header\.example\.org$/)
+
+

HostAny

+

Evaluates to true if request host exactly equals to any of the configured hostnames.

+

Parameters:

+
    +
  • hostnames (string)
  • +
+

Examples:

+
HostAny("www.example.org", "www.example.com")
+HostAny("localhost:9090")
+
+

Forwarded header predicates

+

Uses standardized Forwarded header (RFC 7239)

+

More info about the header: MDN

+

If multiple proxies chain values in the header, as a comma separated list, the predicates below will only match +the last value in the chain for each part of the header.

+

Example: Forwarded: host=example.com;proto=https, host=example.org

+
    +
  • ForwardedHost(/^example\.com$/) - does not match
  • +
  • ForwardedHost(/^example\.org$/) - matches
  • +
  • ForwardedHost(/^example\.org$/) && ForwardedProto("https") - matches
  • +
  • ForwardedHost(/^example\.com$/) && ForwardedProto("https") - does not match
  • +
+

ForwardedHost

+

Regular expressions that the forwarded host header in the request must match.

+

Parameters:

+
    +
  • Host (regex)
  • +
+

Examples:

+
ForwardedHost(/^my-host-header\.example\.org$/)
+ForwardedHost(/header\.example\.org$/)
+
+

ForwardedProtocol

+

Protocol the forwarded header in the request must match.

+

Parameters:

+
    +
  • Protocol (string)
  • +
+

Only “http” and “https” values are allowed

+

Examples:

+
ForwardedProtocol("http")
+ForwardedProtocol("https")
+
+

Weight

+

By default, the weight (priority) of a route is determined by the number of defined predicates.

+

If you want to give a route more priority, you can give it more weight.

+

Parameters:

+
    +
  • Weight (int)
  • +
+

Example where route2 has more priority because it has more predicates:

+
route1: Path("/test") -> "http://www.zalando.de";
+route2: Path("/test") && True() -> "http://www.zalando.de";
+
+

Example where route1 has more priority because it has more weight:

+
route1: Path("/test") && Weight(100) -> "http://www.zalando.de";
+route2: Path("/test") && True() && True() -> "http://www.zalando.de";
+
+

True

+

Does always match. Before Weight predicate existed this was used to give a route more weight.

+

Example where route2 has more weight.

+
route1: Path("/test") -> "http://www.zalando.de";
+route2: Path("/test") && True() -> "http://www.github.com";
+
+

False

+

Does not match. Can be used to disable certain routes.

+

Example where route2 is disabled.

+
route1: Path("/test") -> "http://www.zalando.de";
+route2: Path("/test") && False() -> "http://www.github.com";
+
+

Shutdown

+

Evaluates to true if Skipper is shutting down. Can be used to create customized healthcheck.

+
health_up: Path("/health") -> inlineContent("OK") -> <shunt>;
+health_down: Path("/health") && Shutdown() -> status(503) -> inlineContent("shutdown") -> <shunt>;
+
+

Method

+

The HTTP method that the request must match. HTTP methods are one of +GET, HEAD, PATCH, POST, PUT, DELETE, OPTIONS, CONNECT, TRACE.

+

Parameters:

+
    +
  • Method (string)
  • +
+

Examples:

+
Method("GET")
+Method("OPTIONS")
+
+

Methods

+

The HTTP method that the request must match. HTTP methods are one of +GET, HEAD, PATCH, POST, PUT, DELETE, OPTIONS, CONNECT, TRACE.

+

Parameters:

+
    +
  • Method (…string) methods names
  • +
+

Examples:

+
Methods("GET")
+Methods("OPTIONS", "POST")
+Methods("OPTIONS", "POST", "patch")
+
+ +

A header key and exact value that must be present in the request. Note +that Header(“Key”, “Value”) is equivalent to HeaderRegexp(“Key”, “^Value$”).

+

Parameters:

+
    +
  • Header (string, string)
  • +
+

Examples:

+
Header("X-Forwarded-For", "192.168.0.2")
+Header("Accept", "application/json")
+
+

HeaderRegexp

+

A header key and a regular expression, where the key must be present +in the request and one of the associated values must match the +expression.

+

Parameters:

+
    +
  • HeaderRegexp (string, regex)
  • +
+

Examples:

+
HeaderRegexp("X-Forwarded-For", "^192\.168\.0\.[0-2]?[0-9]?[0-9] ")
+HeaderRegexp("Accept", "application/(json|xml)")
+
+ +

Matches if the specified cookie is set in the request.

+

Parameters:

+
    +
  • Cookie (string, regex) name and value match
  • +
+

Examples:

+
Cookie("alpha", /^enabled$/)
+
+

Auth

+

Authorization header based match.

+

JWTPayloadAnyKV

+

Match the route if at least one of the base64 decoded JWT content +matches the key value configuration.

+

Parameters:

+
    +
  • Key-Value pairs (…string), odd index is the key of the JWT + content and even index is the value of the JWT content
  • +
+

Examples:

+
JWTPayloadAnyKV("iss", "https://accounts.google.com")
+JWTPayloadAnyKV("iss", "https://accounts.google.com", "email", "skipper-router@googlegroups.com")
+
+

JWTPayloadAllKV

+

Match the route if all of the base64 decoded JWT content +matches the key value configuration.

+

Parameters:

+
    +
  • Key-Value pairs (…string), odd index is the key of the JWT + content and even index is the value of the JWT content
  • +
+

Examples:

+
JWTPayloadAllKV("iss", "https://accounts.google.com")
+JWTPayloadAllKV("iss", "https://accounts.google.com", "email", "skipper-router@googlegroups.com")
+
+

JWTPayloadAnyKVRegexp, JWTPayloadAllKVRegexp

+

Behaves exactly the same as JWTPayloadAnyKV, JWTPayloadAllKV, +but the expected values are regular expressions that will be matched +against the JWT value.

+

Examples:

+
JWTPayloadAllKVRegexp("iss", "^https://")
+JWTPayloadAnyKVRegexp("iss", "^https://")
+
+

HeaderSHA256

+

Matches if SHA-256 hash of the header value (known as pre-shared key or secret) +equals to any of the configured hash values. +Several hash values could be used to match multiple secrets e.g. during secret rotation.

+

Hash values only hide secrets from parties that have access to the source of Skipper routes. +Authentication strength depends on the strength of the secret value so e.g. +HeaderSHA256("X-Secret", "2bb80d537b1da3e38bd30361aa855686bde0eacd7162fef6a25fe97bf527a25b") +is not stronger than just Header("X-Secret", "secret").

+

The secret value must be kept secret, must be used by a single client and must be rotated periodically. +See below how to generate random secret value using OpenSSL.

+

Parameters:

+
    +
  • header name (string)
  • +
  • one or more hex-encoded SHA-256 hashes of the matching header values (string)
  • +
+

Secure secret value example: +

#
+# 1. Generate cryptographically secure pseudo random secret header value:
+# - length of at least 32 bytes (the size of the SHA-256 output)
+# - encode as -base64 or -hex to get ASCII text value
+#
+SECRET=$(openssl rand -base64 32)
+echo $SECRET
+3YchPsliGjBXvyl/ncLWEI8/loKGrj/VNM4garxWEmA=
+
+#
+# 2. Get SHA-256 hash of the secret header value to use as HeaderSHA256 argument:
+# - use echo -n to not output the trailing newline
+#
+echo -n $SECRET | sha256sum
+a6131ba920df753c8109500cc11818f7192336d06532f6fa13009c2e4f6e1841  -
+
+
// 3. Configure route to match hash of the secret value
+HeaderSHA256(
+    "X-Secret",
+    "a6131ba920df753c8109500cc11818f7192336d06532f6fa13009c2e4f6e1841"
+) -> inlineContent("ok\n") -> <shunt>
+
+
# 4. Test secret value
+curl -H "X-Secret: $SECRET" http://localhost:9090
+

+

Secret rotation example: +

// To rotate secret:
+// * add new secret - both old and new secrets match during rotation
+// * update client to use new secret
+// * remove old secret
+HeaderSHA256(
+    "X-Secret",
+    "cba06b5736faf67e54b07b561eae94395e774c517a7d910a54369e1263ccfbd4", // SHA256("old")
+    "11507a0e2f5e69d5dfa40a62a1bd7b6ee57e6bcd85c67c9b8431b36fff21c437"  // SHA256("new")
+) -> inlineContent("ok\n") -> <shunt>
+

+

Basic access authentication example: +

anon: * -> setResponseHeader("WWW-Authenticate", `Basic realm="foo", charset="UTF-8"`) -> status(401) -> <shunt>;
+auth: HeaderSHA256(
+    "Authorization",
+    "caae07e42ed8d231a58edcde95782b0feb67186172c18c89894ce4c2174df137", // SHA256("Basic " + BASE64("test:123£"))
+    "157da8472590f0ce0a7c651bd79aecb5cc582944fcf76cbabada915d333deee8"  // SHA256("Basic " + BASE64("Aladdin:open sesame"))
+) -> inlineContent("ok\n") -> <shunt>;
+

+

Interval

+

An interval implements custom predicates to match routes only during some period of time.

+

There are three predicates: Between, Before and After. All +predicates can be created using the date represented as: +* a string in RFC3339 format (see https://golang.org/pkg/time/#pkg-constants) +* a string in RFC3339 format without numeric timezone offset and a location name corresponding to a file in the IANA Time Zone database +* an int64 or float64 number corresponding to the given Unix time in seconds since January 1, 1970 UTC. float64 number will be converted into int64 number

+

After

+

Matches if the request is after the specified time

+

Parameters:

+
    +
  • After (string) RFC3339 datetime string
  • +
  • After (string, string) RFC3339 datetime string without timezone offset, location name
  • +
  • After (int) unixtime in seconds
  • +
+

Examples:

+
After("2016-01-01T12:00:00+02:00")
+After("2021-02-18T00:00:00", "Europe/Berlin")
+After(1451642400)
+
+

Before

+

Matches if the request is before the specified time

+

Parameters:

+
    +
  • Before (string) RFC3339 datetime string
  • +
  • Before (string, string) RFC3339 datetime string without timezone offset, location name
  • +
  • Before (int) unixtime in seconds
  • +
+

Examples:

+
Before("2016-01-01T12:00:00+02:00")
+Before("2021-02-18T00:00:00", "Europe/Berlin")
+Before(1451642400)
+
+

Between

+

Matches if the request is between the specified timeframe

+

Parameters:

+
    +
  • Between (string, string) RFC3339 datetime string, from - till
  • +
  • Between (string, string, string) RFC3339 datetime string without timezone offset, from - till and a location name
  • +
  • Between (int, int) unixtime in seconds, from - till
  • +
+

Examples:

+
Between("2016-01-01T12:00:00+02:00", "2016-02-01T12:00:00+02:00")
+Between("2021-02-18T00:00:00", "2021-02-18T01:00:00", "Europe/Berlin")
+Between(1451642400, 1454320800)
+
+

Cron

+

Matches routes when the given cron-like expression matches the system time.

+

Parameters:

+
    +
  • Cron-like expression. See the package documentation for supported & unsupported features. Expressions are expected to be in the same time zone as the system that generates the time.Time instances.
  • +
+

Examples:

+
// match everything
+Cron("* * * * *")
+// match only when the hour is between 5-7 (inclusive)
+Cron("* 5-7, * * *")
+// match only when the hour is between 5-7, equal to 8, or between 12-15
+Cron("* 5-7,8,12-15 * * *")
+// match only when it is weekdays
+Cron("* * * * 1-5")
+// match only when it is weekdays & working hours
+Cron("* 7-18 * * 1-5")
+
+

QueryParam

+

Match request based on the Query Params in URL

+

Parameters:

+
    +
  • QueryParam (string) name
  • +
  • QueryParam (string, regex) name and value match
  • +
+

Examples:

+
// matches http://example.org?bb=a&query=withvalue
+QueryParam("query")
+
+// Even a query param without a value
+// matches http://example.org?bb=a&query=
+QueryParam("query")
+
+// matches with regexp
+// matches http://example.org?bb=a&query=example
+QueryParam("query", "^example$")
+
+// matches with regexp and multiple values of query param
+// matches http://example.org?bb=a&query=testing&query=example
+QueryParam("query", "^example$")
+
+

Source

+

Source implements a custom predicate to match routes based on +the source IP or X-Forwarded-For header of a request.

+

Parameters:

+
    +
  • Source (string, ..) varargs with IPs or CIDR
  • +
+

Examples:

+
// only match requests from 1.2.3.4
+Source("1.2.3.4")
+
+// only match requests from 1.2.3.0 - 1.2.3.255
+Source("1.2.3.0/24")
+
+// only match requests from 1.2.3.4 and the 2.2.2.0/24 network
+Source("1.2.3.4", "2.2.2.0/24")
+
+

SourceFromLast

+

The same as Source, but use the last part of the +X-Forwarded-For header to match the network. This seems to be only +used in the popular loadbalancers from AWS, ELB and ALB, because they +put the client-IP as last part of the X-Forwarded-For headers.

+

Parameters:

+
    +
  • SourceFromLast (string, ..) varargs with IPs or CIDR
  • +
+

Examples:

+
SourceFromLast("1.2.3.4", "2.2.2.0/24")
+
+

ClientIP

+

ClientIP implements a custom predicate to match routes based on +the client IP of a request.

+

Parameters:

+
    +
  • ClientIP (string, ..) varargs with IPs or CIDR
  • +
+

Examples:

+
// only match requests from 1.2.3.4
+ClientIP("1.2.3.4")
+
+// only match requests from 1.2.3.0 - 1.2.3.255
+ClientIP("1.2.3.0/24")
+
+// only match requests from 1.2.3.4 and the 2.2.2.0/24 network
+ClientIP("1.2.3.4", "2.2.2.0/24")
+
+

Tee

+

The Tee predicate matches a route when a request is spawn from the +teeLoopback filter as a tee request, using +the same provided label.

+

Parameters:

+
    +
  • tee label (string): the predicate will match only those requests that + were spawn from a teeLoopback filter using the same label.
  • +
+

See also:

+ +

Traffic

+

Traffic implements a predicate to control the matching probability for +a given route by setting its weight.

+

The probability for matching a route is defined by the mandatory first +parameter, that must be a decimal number between 0.0 and 1.0 (both +inclusive).

+

The optional second argument is used to specify the cookie name for +the traffic group, in case you want to use stickiness. Stickiness +allows all subsequent requests from the same client to match the same +route. Stickiness of traffic is supported by the optional third +parameter, indicating whether the request being matched belongs to the +traffic group of the current route. If yes, the predicate matches +ignoring the chance argument.

+

Parameters:

+
    +
  • Traffic (decimal) valid values [0.0, 1.0]
  • +
  • Traffic (decimal, string, string) session stickiness
  • +
+

Examples:

+

non-sticky:

+
// hit by 10% percent chance
+v2:
+    Traffic(.1) ->
+    "https://api-test-green";
+
+// hit by remaining chance
+v1:
+    * ->
+    "https://api-test-blue";
+
+

stickiness:

+
// hit by 5% percent chance
+cartTest:
+    Traffic(.05, "cart-test", "test") && Path("/cart") ->
+    responseCookie("cart-test", "test") ->
+    "https://cart-test";
+
+// hit by remaining chance
+cart:
+    Path("/cart") ->
+    responseCookie("cart-test", "default") ->
+    "https://cart";
+
+// hit by 15% percent chance
+catalogTestA:
+    Traffic(.15, "catalog-test", "A") ->
+    responseCookie("catalog-test", "A") ->
+    "https://catalog-test-a";
+
+// hit by 30% percent chance
+catalogTestB:
+    Traffic(.3, "catalog-test", "B") ->
+    responseCookie("catalog-test", "B") ->
+    "https://catalog-test-b";
+
+// hit by remaining chance
+catalog:
+    * ->
+    responseCookie("catalog-test", "default") ->
+    "https://catalog";
+
+

TrafficSegment

+

TrafficSegment predicate requires two number arguments \(min\) and \(max\) +from an interval \([0, 1]\) (from zero included to one included) and \(min <= max\).

+

Let \(r\) be one-per-request uniform random number value from \([0, 1)\). +TrafficSegment matches if \(r\) belongs to an interval from \([min, max)\). +Upper interval boundary \(max\) is excluded to simplify definition of +adjacent intervals - the upper boundary of the first interval +then equals lower boundary of the next and so on, e.g. \([0, 0.25)\) and \([0.25, 1)\).

+

This predicate has weight of -1 and therefore does not affect route weight.

+

Parameters:

+
    +
  • min (decimal) from an interval [0, 1]
  • +
  • max (decimal) from an interval [0, 1], min <= max
  • +
+

Example of routes splitting traffic in 50%+30%+20% proportion:

+
r50: Path("/test") && TrafficSegment(0.0, 0.5) -> <shunt>;
+r30: Path("/test") && TrafficSegment(0.5, 0.8) -> <shunt>;
+r20: Path("/test") && TrafficSegment(0.8, 1.0) -> <shunt>;
+
+

ContentLengthBetween

+

The ContentLengthBetween predicate matches a route when a request content length header value is between min and max provided values. +In case the client does not specify the content length value then the predicate will not match.

+

Parameters:

+
    +
  • min (int): the lower bound (inclusive) for the content length check. The value must be greater than or equal to 0.
  • +
  • max (int): the upper bound (exclusive) for the content length check. The value must be greater than min.
  • +
+

Examples:

+
// matches the range from 0 to 999
+ContentLengthBetween(0, 1000)
+
+// matches the range from 1000 to 9999
+ContentLengthBetween(1000, 10000)
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/reference/scripts/index.html b/reference/scripts/index.html new file mode 100644 index 0000000000..47ade17c71 --- /dev/null +++ b/reference/scripts/index.html @@ -0,0 +1,1705 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Scripts - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + +

Lua filter scripts

+

LUA scripts can be used as filters in skipper. The +current implementation supports Lua 5.1.

+

Route filters

+

The lua scripts can be added to a route description with the lua() filter, +the first parameter for the filter is the script. This can be either a file +name (ending with .lua) or inline code, e.g. as

+
    +
  • file lua("/path/to/file.lua") - if a file path is not absolute, the path + is relative to skipper’s working directory.
  • +
  • inline lua("function request(c, p); print(c.request.url); end")
  • +
+

Any other additional parameters for the filter will be passed as +a second table parameter to the called functions.

+
+

Any parameter starting with “lua-” should not be used to pass +values for the script - those will be used for configuring the filter.

+
+

Script requirements

+

A filter script needs at least one global function: request or response. +If present, they are called with a skipper filter context and the params passed +in the route as table like +

-- route looks like
+--
+-- any: * -> lua("./test.lua", "myparam=foo", "other=bar", "justkey") -> <shunt>
+--
+function request(ctx, params)
+    print(params[1])      -- myparam=foo
+    print(params[2])      -- other=bar
+    print(params[3])      -- justkey
+    print(params[4])      -- nil
+    print(params.myparam) -- foo
+    print(params.other)   -- bar
+    print(params.justkey) -- (empty string)
+    print(params.x)       -- nil
+end
+

+
+

Parameter table allows index access as well as key-value access

+
+ +

Lua print builtin function writes skipper info log messages.

+

sleep

+

sleep(number) function pauses execution for at least number milliseconds. A negative or zero duration causes sleep to return immediately.

+

Enable and Disable lua sources

+

The flag -lua-sources allows to set 5 different values:

+
    +
  • “file”: Allows to use reference to file for scripts
  • +
  • “inline”: Allows to use inline scripts
  • +
  • “inline”, “file”: Allows to use reference to file and inline scripts
  • +
  • “none”: Disable Lua filters
  • +
  • ”“: the same as “inline”, “file”, the default value for binary and + library users
  • +
+

Available lua modules

+

Besides the standard modules - except +for debug - the following additional modules have been preloaded and can be used with e.g. +local http = require("http"), see also the examples below

+ +

For differences between the standard modules and the gopher-lua implementation +check the gopher-lua documentation.

+

Any other module can be loaded in non-byte code form from the lua path (by default +for require("mod") this is ./mod.lua, /usr/local/share/lua/5.1/mod.lua and +/usr/local/share/lua/5.1/mod/init.lua).

+

You may selectively enable standard and additional Lua modules using -lua-modules flag: +

-lua-modules=package,base,json
+
+Note that preloaded additional modules require package module.

+

For standard modules you may enable only a subset of module symbols: +

-lua-modules=base.print,base.assert
+

+

Use none to disable all modules: +

-lua-modules=none
+

+

See also http://lua-users.org/wiki/SandBoxes

+

Lua states

+

There is no guarantee that the request() and response() functions of a +lua script run in the same lua state during one request. Setting a variable +in the request and accessing it in the response will most likely fail and lead +to hard debuggable errors. Use the ctx.state_bag to propagate values from +request to response - and any other filter in the chain.

+

Request and response

+

The request() function is run for an incoming request and response() for backend response.

+

Headers

+

Request headers can be accessed via ctx.request.header table like +

ua = ctx.request.header["user-agent"]
+
+and iterated like +
for k, v in ctx.request.header() do
+    print(k, "=", v);
+end
+

+
+

Header table is a functable that returns iterator

+
+

Header names are normalized by the net/http go module +like usual. Setting a +header is done by assigning to the header table. Setting a header to nil or +an empty string deletes the header - setting to nil is preferred.

+
ctx.request.header["user-agent"] = "skipper.lua/0.0.1"
+ctx.request.header["Authorization"] = nil -- delete authorization header
+
+
+

header table returns empty string for missing keys

+
+

Response headers ctx.response.header work the same way - this is of course only valid in the response() phase.

+

Multiple header values

+

Request and response header tables provide access to a first value of a header.

+

To access multiple values use add and values methods:

+
function request(ctx, params)
+    ctx.request.header.add("X-Foo", "Bar")
+    ctx.request.header.add("X-Foo", "Baz")
+
+    -- all X-Foo values
+    for _, v in pairs(ctx.request.header.values("X-Foo")) do
+        print(v)
+    end
+
+    -- all values
+    for k, _ in ctx.request.header() do
+        for _, v in pairs(ctx.request.header.values(k)) do
+            print(k, "=", v)
+        end
+    end
+end
+
+

Other request fields

+
    +
  • backend_url - (read only) returns the backend url specified in the route + or an empty value in case it’s a shunt or loopback
  • +
  • host - (read only) the ‘Host’ header that was in the incoming + request to the proxy
  • +
  • outgoing_host - (read/write) the host that will be set for the outgoing + proxy request as the ‘Host’ header.
  • +
  • remote_addr - (read only) the remote host, usually IP:port
  • +
  • content_length - (read only) content length
  • +
  • proto - (read only) something like “HTTP/1.1”
  • +
  • method - (read only) request method, e.g. “GET” or “POST”
  • +
  • url - (read/write) request URL as string
  • +
  • url_path - (read/write) request URL path as string
  • +
  • url_query - (read/write) request URL query parameter table, similar to header table but returns nil for missing keys
  • +
  • url_raw_query - (read/write) encoded request URL query values, without ‘?’ as string
  • +
  • cookie - (read only) request cookie table, similar to header table but returns nil for missing keys
  • +
+

Other response fields

+
    +
  • status_code - (read/write) response status code as number, e.g. 200
  • +
+

Serving requests from lua

+

Requests can be served with ctx.serve(table), you must return after this +call. Possible keys for the table:

+
    +
  • status_code (number) - required (but currently not enforced)
  • +
  • header (table)
  • +
  • body (string)
  • +
+

See also redirect and internal server error +examples below

+

Path parameters

+

Path parameters (if any) can be read via ctx.path_param table +

Path("/api/:id") -> lua("function request(ctx, params); print(ctx.path_param.id); end") -> <shunt>
+

+
+

path_param table returns nil for missing keys

+
+

StateBag

+

The state bag can be used to pass string, number and table values from one filter to another in the same +chain. It is shared by all filters in one request (lua table values are only available to lua filters). +

function request(ctx, params)
+    -- the value of "mykey" will be available to all filters in the chain now:
+    ctx.state_bag["mykey"] = "foo"
+end
+
+function response(ctx, params)
+    print(ctx.state_bag["mykey"])
+end
+

+
+

state_bag table returns nil for missing keys

+
+

Examples

+
+

The examples serve as examples. If there is a go based plugin available, +use that instead. For overhead estimate see benchmark.

+
+

OAuth2 token as basic auth password

+
local base64 = require("base64")
+
+function request(ctx, params)
+    token = string.gsub(ctx.request.header["Authorization"], "^%s*[Bb]earer%s+", "", 1)
+    user = ctx.request.header["x-username"]
+    if user == "" then
+        user = params.username
+    end
+    ctx.request.header["Authorization"] = "Basic " .. base64.encode(user .. ":"  .. token)
+    -- print(ctx.request.header["Authorization"])
+end
+
+

validate token

+
local http = require("http")
+function request(ctx, params)
+    token = string.gsub(ctx.request.header["Authorization"], "^%s*[Bb]earer%s+", "", 1)
+    if token == "" then
+        ctx.serve({status_code=401, body="Missing Token"})
+        return
+    end
+
+    res, err = http.get("https://auth.example.com/oauth2/tokeninfo?access_token="..token)
+    if err ~= nil then
+        print("Failed to get tokeninfo: " .. err)
+        ctx.serve({status_code=401, body="Failed to validate token: "..err})
+        return
+    end
+    if res.status_code ~= 200 then
+        ctx.serve({status_code=401, body="Invalid token"})
+        return
+    end
+end
+
+

strip query

+
function request(ctx, params)
+    ctx.request.url = string.gsub(ctx.request.url, "%?.*$", "")
+    -- print("URL="..ctx.request.url)
+end
+
+

redirect

+
function request(ctx, params)
+    ctx.serve({
+        status_code=302,
+        header={
+            location="http://www.example.org/",
+        },
+    })
+end
+
+

internal server error

+
function request(ctx, params)
+    -- let 10% of all requests fail with 500
+    if math.random() < 0.1 then
+        ctx.serve({
+            status_code=500,
+            body="Internal Server Error.\n",
+        })
+    end
+end
+
+

set request header from params

+
function request(ctx, params)
+    ctx.request.header[params[1]] = params[2]
+    if params[1]:lower() == "host" then
+        ctx.request.outgoing_host = params[2]
+    end
+end
+
+

Benchmark

+

redirectTo vs lua redirect

+

See skptesting/benchmark-lua.sh

+

Route for “skipper” is * -> redirectTo(302, "http://localhost:9980") -> <shunt>, +route for “lua” is * -> lua("function request(c,p); c.serve({status_code=302, header={location='http://localhost:9980'}});end") -> <shunt>

+

Benchmark results +

[benchmarking skipper-redirectTo]
+Running 12s test @ http://127.0.0.1:9990/lorem.html
+  2 threads and 128 connections
+  Thread Stats   Avg      Stdev     Max   +/- Stdev
+    Latency     4.19ms    5.38ms  69.50ms   85.10%
+    Req/Sec    26.16k     2.63k   33.22k    64.58%
+  Latency Distribution
+     50%    1.85ms
+     75%    6.38ms
+     90%   11.66ms
+     99%   23.34ms
+  626122 requests in 12.04s, 91.36MB read
+Requests/sec:  51996.22
+Transfer/sec:      7.59MB
+[benchmarking skipper-redirectTo done]
+
+[benchmarking redirect-lua]
+Running 12s test @ http://127.0.0.1:9991/lorem.html
+  2 threads and 128 connections
+  Thread Stats   Avg      Stdev     Max   +/- Stdev
+    Latency     6.81ms    9.69ms 122.19ms   85.95%
+    Req/Sec    21.17k     2.83k   30.63k    73.75%
+  Latency Distribution
+     50%    2.21ms
+     75%   10.22ms
+     90%   19.88ms
+     99%   42.54ms
+  507434 requests in 12.06s, 68.72MB read
+Requests/sec:  42064.69
+Transfer/sec:      5.70MB
+[benchmarking redirect-lua done]
+
+show lua performance is ~80% of native.

+

The benchmark was run with the default pool size of script.InitialPoolSize = 3; script.MaxPoolSize = 10. +With script.InitialPoolSize = 128; script.MaxPoolSize = 128 (tweaked for this benchmark) you get >95% of native performance in lua: +

[benchmarking skipper-redirectTo]
+Running 12s test @ http://127.0.0.1:9990/lorem.html
+  2 threads and 128 connections
+  Thread Stats   Avg      Stdev     Max   +/- Stdev
+    Latency     4.15ms    5.24ms  62.27ms   84.88%
+    Req/Sec    25.81k     2.64k   32.74k    70.00%
+  Latency Distribution
+     50%    1.88ms
+     75%    6.49ms
+     90%   11.43ms
+     99%   22.49ms
+  617499 requests in 12.03s, 90.10MB read
+Requests/sec:  51336.87
+Transfer/sec:      7.49MB
+[benchmarking skipper-redirectTo done]
+
+[benchmarking redirect-lua]
+Running 12s test @ http://127.0.0.1:9991/lorem.html
+  2 threads and 128 connections
+  Thread Stats   Avg      Stdev     Max   +/- Stdev
+    Latency     3.79ms    4.98ms  91.19ms   87.15%
+    Req/Sec    25.14k     4.71k   51.45k    72.38%
+  Latency Distribution
+     50%    1.61ms
+     75%    5.17ms
+     90%   10.05ms
+     99%   21.83ms
+  602630 requests in 12.10s, 81.61MB read
+Requests/sec:  49811.24
+Transfer/sec:      6.75MB
+[benchmarking redirect-lua done]
+

+

Similar results are achieved when testing stripQuery() vs the lua version from above.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 0000000000..1187f77e7f --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

This is the documentation page of Skipper. Skipper is an HTTP router and reverse proxy for service composition. It\u2019s designed to handle large amounts of dynamically configured HTTP route definitions (>800000 routes) with detailed lookup conditions, and flexible augmentation of the request flow with filters. It can be used out of the box or extended with custom lookup, filter logic and configuration sources.

"},{"location":"#http-proxy","title":"HTTP Proxy","text":"

Skipper identifies routes based on the requests\u2019 properties, such as path, method, host and headers using the predicates. It allows the modification of the requests and responses with filters that are independently configured for each route. Learn here more about how it works.

"},{"location":"#kubernetes-ingress","title":"Kubernetes Ingress","text":"

Skipper can be used to run as a Kubernetes Ingress controller. Details with examples of Skipper\u2019s capabilities and an overview can be found in the ingress-controller deployment docs.

"},{"location":"data-clients/eskip-file/","title":"Eskip File","text":"

Eskip file dataclient can be used to serve static defined routes, read from an eskip file. The file format eskip shows your route definitions in a clear way:

% cat example.eskip\nhello: Path(\"/hello\") -> \"https://www.example.org\"\n

The Skipper project has two binaries, one is skipper, the other is eskip. Eskip can be used to validate the syntax of your routes file before reloading a production server:

% eskip check example.eskip\n

To run Skipper serving routes from an eskip file you have to use -routes-file <file> parameter:

% skipper -routes-file example.eskip\n

A more complicated example with different routes, matches, predicates and filters shows that you can name your route and use preconditions and create, change, delete HTTP headers as you like:

% cat complicated_example.eskip\nhostHeaderMatch:\n         Host(\"^skipper.teapot.org$\")\n         -> setRequestHeader(\"Authorization\", \"Basic YWRtaW46YWRtaW5zcGFzc3dvcmQK\")\n         -> \"https://target-to.auth-with.basic-auth.enterprise.com\";\nbaiduPathMatch:\n        Path(\"/baidu\")\n        -> setRequestHeader(\"Host\", \"www.baidu.com\")\n        -> setPath(\"/s\")\n        -> setQuery(\"wd\", \"godoc skipper\")\n        -> \"http://www.baidu.com\";\ngoogleWildcardMatch:\n        *\n        -> setPath(\"/search\")\n        -> setQuery(\"q\", \"godoc skipper\")\n        -> \"https://www.google.com\";\nyandexWildacardIfCookie:\n        * && Cookie(\"yandex\", \"true\")\n        -> setPath(\"/search/\")\n        -> setQuery(\"text\", \"godoc skipper\")\n        -> tee(\"http://127.0.0.1:12345/\")\n        -> \"https://yandex.ru\";\n

The former example shows 4 routes: hostHeaderMatch, baiduPathMatch, googleWildcardMatch and yandexWildcardIfCookie.

  • hostHeaderMatch:
    • used if HTTP host header is exactly: \u201cskipper.teapot.org\u201d,
    • sets a Basic Authorization header and
    • sends the modified request to https://target-to.auth-with.basic-auth.enterprise.com
  • baiduPathMatch:
    • used in case the request patch matches /baidu
    • it will set the Host header to the proxy request
    • it will set the path from /baidu to /s
    • it will set the querystring to \u201cws=godoc skipper\u201d and
    • sends the modified request to http://baidu.com
  • googleWildcardMatch:
    • used as default if no other route matches
    • it will set the path to /search
    • it will set the querystring to \u201cq=godoc skipper\u201d and
    • sends the modified request to https://www.google.com
  • yandexWildcardIfCookie:
    • used as default if a Cookie named \u201cyandex\u201d has the value \u201ctrue\u201d
    • it will set the path to /search/
    • it will set the querystring to \u201ctext=godoc skipper\u201d
    • it will send a copy of the modified request to http://127.0.0.1:12345/ (similar to unix tee) and drop the response and
    • sends the modified request to https://yandex.ru

More examples you find in eskip file format description, in filters and in predicates.

Eskip file format is also used if you print your current routes in skipper, for example:

% curl localhost:9911/routes\n*\n  -> setResponseHeader(\"Content-Type\", \"application/json; charset=utf-8\")\n  -> inlineContent(\"{\\\"foo\\\": 3}\")\n  -> <shunt>\n
"},{"location":"data-clients/eskip-remote/","title":"Remote eskip","text":"

Skipper can fetch routes in eskip format over HTTP:

curl https://opensource.zalando.com/skipper/data-clients/example.eskip\nhello: Path(\"/hello\") -> \"https://www.example.org\"\n\nskipper -routes-urls=https://opensource.zalando.com/skipper/data-clients/example.eskip\n\ncurl -s http://localhost:9090/hello | grep title\n    <title>Example Domain</title>\n

You may use multiple urls separated by comma and configure url poll interval via -source-poll-timeout flag.

"},{"location":"data-clients/etcd/","title":"etcd","text":"

etcd is an open-source distributed key value store: https://github.com/etcd-io/etcd. Skipper can use it as a route configuration storage and continuously synchronize the routing from etcd.

"},{"location":"data-clients/etcd/#why-storing-skipper-routes-in-etcd","title":"Why storing Skipper routes in etcd?","text":"

When running multiple Skipper instances, changing the configuration of each instance by accessing the instances directly on the fly can be complex and error-prone. With etcd, we need to update the routes only in etcd and each Skipper instance will synchronize its routing from the new version.

Further benefits of using etcd are improved resiliency and the usage of a standard configuration storage for various system components of a distributed system, not only Skipper.

Note: in case of Kubernetes, the standard recommended way is to use the Kubernetes Ingress API.

"},{"location":"data-clients/etcd/#starting-skipper-with-etcd","title":"Starting Skipper with etcd","text":"

Example:

skipper -etcd-urls http://localhost:2379,http://localhost:4001\n

An additional startup option is the -etcd-prefix. When using multiple Skipper deployments with different purpose, this option allows us to store separate configuration sets for them in the same etcd cluster. Example:

skipper -etcd-urls https://cluster-config -etcd-prefix skipper1\n

Note: when the etcd URL points to an etcd proxy, Skipper will internally use the proxy only to resolve the URLs of the etcd replicas, and access them for the route configuration directly.

"},{"location":"data-clients/etcd/#etcd-version","title":"etcd version","text":"

Skipper uses currently the V2 API of etcd.

"},{"location":"data-clients/etcd/#storage-schema","title":"Storage schema","text":"

Skipper expects to find the route configuration by default at the /v2/keys/skipper/routes path. In this path, the \u2018skipper\u2019 segment can be optionally overridden by the -etcd-prefix startup option.

The /v2/keys/skipper/routes node is a directory that contains the routes as individual child nodes, accessed by the path /v2/keys/skipper/routes/<routeID>. The value of the route nodes is the route expression without the route ID in eskip format.

"},{"location":"data-clients/etcd/#maintaining-route-configuration-in-etcd","title":"Maintaining route configuration in etcd","text":"

etcd (v2) allows generic access to its API via the HTTP protocol. It also provides a supporting client tool: etcdctl. Following the above described schema, both of them can be used to maintain Skipper routes. In addition, Skipper also provides a supporting client tool: eskip, which can provide more convenient access to the routes in etcd.

Getting all routes, a single route, insert or update and delete via HTTP:

curl http://localhost:2379/v2/keys/skipper/routes\ncurl http://localhost:2379/v2/keys/skipper/routes/hello\ncurl -X PUT -d 'value=* -> status(200) -> inlineContent(\"Hello, world!\") -> <shunt>' http://localhost:2379/v2/keys/skipper/routes/hello\ncurl -X DELETE http://localhost:2379/v2/keys/skipper/routes/hello\n

Getting all route IDs, a route expression stored with an ID, insert or update and delete with etcdctl:

etcdctl --endpoints http://localhost:2379,http://localhost:4001 ls /skipper/routes\netcdctl --endpoints http://localhost:2379,http://localhost:4001 get /skipper/routes/hello\netcdctl --endpoints http://localhost:2379,http://localhost:4001 set -- /skipper/routes/hello '* -> status(200) -> inlineContent(\"Hello, world!\") -> <shunt>'\netcdctl --endpoints http://localhost:2379,http://localhost:4001 rm /skipper/routes/bello\n

We use the name \u2018eskip\u2019 for two related concepts: the eskip syntax of route configuration and the eskip command line tool. The command line tool can be used to check the syntax of skipper routes, format route files, prepend or append filters to multiple routes, and also to access etcd.

Getting all routes, a single route, insert or update and delete with eskip:

eskip print -etcd-urls http://localhost:2379,http://localhost:4001\neskip print -etcd-urls http://localhost:2379,http://localhost:4001 | grep hello\neskip upsert -etcd-urls http://localhost:2379,http://localhost:4001 -routes 'hello: * -> status(200) -> inlineContent(\"Hello, world!\") -> <shunt>'\neskip delete -etcd-urls http://localhost:2379,http://localhost:4001 -ids hello\n

When storing multiple configuration sets in etcd, we can use the -etcd-prefix to distinguish between them.

Instead of using routes inline, it may be more convenient to edit them in a file and store them in etcd directly from the file.

Contents of example.eskip:

hello: * -> status(200) -> inlineContent(\"Hello, world!\") -> <shunt>;\nhelloTest: Path(\"/test\") -> status(200) -> inlineContent(\"Hello, test!\") -> <shunt>;\n

Updating those routes in etcd that are defined in the file, or inserting them from the file in case they don\u2019t exist in etcd, yet:

eskip upsert -etcd-urls http://localhost:2379,http://localhost:4001 example.eskip\n

The above command won\u2019t modify or delete those routes, whose ID is missing from example.eskip. To fully sync a set of routes from a file to etcd, use the reset subcommand:

eskip reset -etcd-urls http://localhost:2379,http://localhost:4001 example.eskip\n

For more information see the documentation or eskip -help.

"},{"location":"data-clients/kubernetes/","title":"Kubernetes","text":"

Skipper\u2019s Kubernetes dataclient can be used, if you want to run Skipper as kubernetes-ingress-controller. It will get its route information from provisioned Ingress Objects.

"},{"location":"data-clients/kubernetes/#kubernetes-ingress-controller-deployment","title":"Kubernetes Ingress Controller deployment","text":"

How to install Skipper ingress-controller for cluster operators.

"},{"location":"data-clients/kubernetes/#kubernetes-ingress-usage","title":"Kubernetes Ingress Usage","text":"

Find out more how to use Skipper ingress features for deployers.

"},{"location":"data-clients/kubernetes/#why-to-choose-skipper","title":"Why to choose Skipper?","text":"

Kubernetes is a fast changing environment and traditional http routers are not made for frequently changing routing tables. Skipper is a http proxy made to apply updates very often. Skipper is used in production with more than 200.000 routing table entries. Skipper has Filters to change http data and Predicates to change the matching rules, both can combined and chained. You can set these in ingress.yaml files to build resiliency patterns like load shedding, ratelimit or circuitbreaker. You can also use them to build more highlevel deployment patterns, for example feature toggles, shadow traffic or blue-green deployments.

Skipper\u2019s main features:

  • Filters - create, update, delete all kind of HTTP data
  • collection of base http manipulations: for example manipulating Path, Querystring, HTTP Headers and redirect handling
  • cookie handling
  • circuitbreakers
  • ratelimit: based on client or backend data
  • Shadow traffic filters
  • Predicates - advanced matching capability
  • URL Path match: Path(\"/foo\")
  • Host header match: Host(\"^www.example.org$\")
  • Querystring: QueryParam(\"featureX\")
  • Cookie based: Cookie(\"alpha\", /^enabled$/)
  • source IP allowlist: Source(\"1.2.3.4/24\") or ClientIP(\"1.2.3.4/24\")
  • time based interval
  • traffic by percentage supports also sticky sessions
  • Kubernetes integration
  • All Filters and Predicates can be used with 2 annotations
    • Predicates: zalando.org/skipper-predicate
    • Filters: zalando.org/skipper-filter
  • Custom routes can be defined with the annotation zalando.org/skipper-routes
  • RouteGroup CRD to support all skipper features without limitation
  • monitoring
  • opentracing
  • access logs with fine granular control of logs by status codes
  • Blue-Green deployments, with another Ingress annotation zalando.org/backend-weights
"},{"location":"data-clients/route-string/","title":"Route String","text":"

Route string dataclient can be used to create simple demo applications, for example if you want to show traffic switching or ratelimiting or just need to serve some json in your demo.

"},{"location":"data-clients/route-string/#serve-text","title":"Serve text","text":"

Serve with Content-Type: text/plain; charset=utf-8

Example (Open your browser http://localhost:9090/):

skipper -inline-routes '* -> inlineContent(\"Hello, world!\") -> <shunt>'\n

Docker Example (Open your browser http://localhost:9090/):

docker run -p 9090:9090 -it registry.opensource.zalan.do/teapot/skipper:latest skipper -inline-routes '* -> inlineContent(\"Hello, world!\") -> <shunt>'\n
"},{"location":"data-clients/route-string/#serve-html-with-css","title":"Serve HTML with CSS","text":"

Serve with Content-Type: text/html; charset=utf-8

Example (Open your browser http://localhost:9090/):

skipper -inline-routes '* -> inlineContent(\"<html><body style=\\\"background-color: orange;\\\"></body></html>\") -> <shunt>'\n

Docker Example (Open your browser http://localhost:9090/):

docker run -p 9090:9090 -it registry.opensource.zalan.do/teapot/skipper:latest skipper -inline-routes '* -> inlineContent(\"<html><body style=\\\"background-color: orange;\\\"></body></html>\") -> <shunt>'\n
"},{"location":"data-clients/route-string/#serve-json","title":"Serve JSON","text":"

Serve with Content-Type: application/json; charset=utf-8

Example (Open your browser http://localhost:9090/):

skipper -inline-routes '* -> inlineContent(\"{\\\"foo\\\": 3}\", \"application/json; charset=utf-8\") -> <shunt>'\n

Docker Example (Open your browser http://localhost:9090/):

docker run -p 9090:9090 -it registry.opensource.zalan.do/teapot/skipper:latest skipper -inline-routes '* -> inlineContent(\"{\\\"foo\\\": 3}\", \"application/json; charset=utf-8\") -> <shunt>'\n
"},{"location":"data-clients/route-string/#proxy-to-a-given-url","title":"Proxy to a given URL","text":"

If you just have to build a workaround and you do not want to use socat to do a tcp proxy, but proxy http, you can do:

% skipper -inline-routes '* -> \"https://my-new-backend.example.org/\"'\n
"},{"location":"kubernetes/east-west-usage/","title":"East-West aka svc-to-svc","text":""},{"location":"kubernetes/east-west-usage/#east-west-usage","title":"East-West Usage","text":"

If you run Skipper with an East-West setup, you can use the configured ingress also to do service-to-service calls, bypassing your ingress loadbalancer and stay inside the cluster. You can connect via HTTP to your application based on its ingress configuration.

Example:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: demo\n  namespace: default\nspec:\n  rules:\n  - host: demo.skipper.cluster.local\n    http:\n      paths:\n      - backend:\n          service:\n            name: example\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

Or as a RouteGroup:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: demo\n  namespace: default\nspec:\n  hosts:\n  - demo.skipper.cluster.local\n  backends:\n  - name: backend\n    type: service\n    serviceName: example\n    servicePort: 80\n  defaultBackends:\n  - backendName: backend\n

Your clients inside the cluster should call this example with demo.skipper.cluster.local in their host header. Example from inside a container:

curl http://demo.skipper.cluster.local/\n

You can also use the same ingress or RouteGroup object to accept internal and external traffic:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: demo\n  namespace: default\nspec:\n  rules:\n  - host: demo.example.com\n    http:\n      paths:\n      - backend:\n          service:\n            name: example\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n  - host: demo.skipper.cluster.local\n    http:\n      paths:\n      - backend:\n          service:\n            name: example\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

Or, again, as a RouteGroup:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: demo\n  namespace: default\nspec:\n  hosts:\n  - demo.skipper.cluster.local\n  - demo.example.com\n  backends:\n  - name: backend\n    type: service\n    serviceName: example\n    servicePort: 80\n  defaultBackends:\n  - backendName: backend\n

Metrics will change, because skipper stores metrics per HTTP Host header, which changes with cluster internal calls from demo.example.org to demo.default.skipper.cluster.local.

You can use all features as defined in Ingress Usage, Filters, Predicates via annotations as before and also custom-routes.

"},{"location":"kubernetes/external-addresses/","title":"External Addresses (External Name)","text":"

In Kubernetes, it is possible to define services with external names (type=ExternalName). For ingress objects, Skipper supports these services, and generates routes from the ingress objects that reference one or more external name service, that will have a backend pointing to the network address defined by the specified service.

Route groups don\u2019t support services of type ExternalName, but they support network backends, and even LB backends with explicit endpoints with custom endpoint addresses. This way, it is possible to achieve the same with route groups.

For both the ingress objects and the route groups, the accepted external addresses must be explicitly allowed by listing regexp expressions of which at least one must be matched by the domain name of these addresses. The allow list is a startup option, defined via command line flags or in the configuration file. Enforcing this list happens only in the Kubernetes Ingress mode of Skipper.

"},{"location":"kubernetes/external-addresses/#specifying-allowed-external-names-via-command-line-flags","title":"Specifying allowed external names via command line flags","text":"

For compatibility reasons, the validation needs to be enabled with an explitic toggle:

skipper -kubernetes \\\n-kubernetes-only-allowed-external-names \\\n-kubernetes-allowed-external-name \"^one[.]example[.]org$\" \\\n-kubernetes-allowed-external-name \"^two[.]example[.]org$\"\n
"},{"location":"kubernetes/external-addresses/#specifying-allowed-external-names-via-a-config-file","title":"Specifying allowed external names via a config file","text":"

For compatibility reasons, the validation needs to be enabled with an explitic toggle:

kubernetes-only-allowed-external-names: true\nkubernetes-allowed-external-names:\n- ^one[.]example[.]org$\n- ^two[.]example[.]org$\n
"},{"location":"kubernetes/ingress-backends/","title":"Kubernetes Backend Deployments","text":""},{"location":"kubernetes/ingress-backends/#kubernetes-race-condition-problem","title":"Kubernetes Race Condition problem","text":"

As described in #652, there is a problem that exists in Kubernetes, while terminating Pods. Terminating Pods could be graceful, but the nature of distributed environments will show failures, because not all components in the distributed system changed already their state. When a Pod terminates, the controller-manager has to update the endpoints of the Kubernetes service. Additionally Skipper has to get this endpoints list. Skipper polls the kube-apiserver every -source-poll-timeout=<ms>, which defaults to 3000. Reducing this interval or implementing watch will only reduce the timeframe, but not fix the underlying race condition.

Mitigation strategies can be different and the next section document strategies for application developers to mitigate the problem.

"},{"location":"kubernetes/ingress-backends/#teardown-strategies","title":"Teardown strategies","text":"

An application that is target of an ingress can circumvent HTTP code 504s Gateway Timeouts with these strategies:

  1. use Pod lifecycle hooks
  2. use a SIGTERM handler to switch readinessProbe to unhealthy and exit later, or just wait for SIGKILL terminating the process.
"},{"location":"kubernetes/ingress-backends/#pod-lifecycle-hooks","title":"Pod Lifecycle Hooks","text":"

Kubernetes Pod Lifecycle Hooks in the Pod spec can have a preStop command which executes for example a binary. The following will execute the binary sleep with argument 20 to wait 20 seconds before terminating the containers within the Pod:

lifecycle:\n  preStop:\n    exec:\n      command: [\"sleep\",\"20\"]\n

20 seconds should be enough to fade your Pod out of the endpoints list and Skipper\u2019s routing table.

"},{"location":"kubernetes/ingress-backends/#sigterm-handling-in-containers","title":"SIGTERM handling in Containers","text":"

An application can implement a SIGTERM handler, that changes the readinessProbe target to unhealthy for the application instance. This will make sure it will be deleted from the endpoints list and from Skipper\u2019s routing table. Similar to Pod Lifecycle Hooks you could sleep 20 seconds and after that terminate your application or you just wait until SIGKILL will cleanup the instance after 60s.

go func() {\n    var sigs chan os.Signal\n    sigs = make(chan os.Signal, 1)\n    signal.Notify(sigs, syscall.SIGTERM)\n    for {\n        select {\n            case <-sigs:\n               healthCheck = unhealthy\n               time.Sleep(20*time.Second)\n               os.Exit(0)\n        }\n    }\n}()\n
"},{"location":"kubernetes/ingress-controller/","title":"Skipper Ingress Controller","text":"

This documentation is meant for cluster operators and describes how to install Skipper as Ingress-Controller in your Kubernetes Cluster.

"},{"location":"kubernetes/ingress-controller/#why-you-should-use-skipper-as-ingress-controller","title":"Why you should use Skipper as ingress controller?","text":"

Baremetal load balancers perform really well, but their configuration is not updated frequently and most of the installations are not meant for rapid change. With the introduction of Kubernetes this assumption is no longer valid and there was a need for a HTTP router which supported backend routes which changed very frequently. Skipper was initially designed for a rapidly changing routing tree and subsequently used to implement an ingress controller in Kubernetes.

Cloud load balancers scale well and can be updated frequently, but do not provide many features. Skipper has advanced resiliency and deployment features, which you can use to enhance your environment. For example, ratelimiters, circuitbreakers, blue-green deployments, shadow traffic and more.

"},{"location":"kubernetes/ingress-controller/#comparison-with-other-ingress-controllers","title":"Comparison with other Ingress Controllers","text":"

At Zalando we chose to run kube-ingress-aws-controller with skipper ingress as the target group. While AWS load balancers give us features like TLS termination, automated certificate rotation, possible WAF, and Security Groups, the HTTP routing capabilities are very limited. Skipper\u2019s main advantage compared to other HTTP routers is matching and changing HTTP. Another advantage for us and for skipper users in general is that defaults with kube-ingress-aws-controller just work as you would expect. For lower latency, safety, and cost reasons you can also use Network Load Balancer (NLB) instead of Application Load Balancer (ALB). We tested two cases (Skipper backends were pre-scaled and not changed):

  1. A hard switch to a cold NLB with 1 million requests per second (RPS). A similar test with 100k RPS with ALB results in client visible error rates and high latency percentiles.
  2. A 6h test with 2k RPS showed regular spikes in p999 latency to more than 100ms in for ALB. NLB showed a flat p999 latency of 25-35ms for the same workload.

There are a number of other ingress controllers including traefik, nginx, haproxy or aws-alb-ingress-controller. Why not one of these?

HAproxy and Nginx are well understood and good TCP/HTTP proxies, that were built before Kubernetes. As a result, the first drawback is their reliance on static configuration files which comes from a time when routes and their configurations were relatively static. Secondly, the list of annotations to implement even basic features are already quite a big list for users. Skipper was built to support dynamically changing route configurations, which happens quite often in Kubernetes. Other advantage of using Skipper is that we are able to easily implement automated canary deployments, automated blue-green deployments or shadow traffic.

However there are some features that have better support in aws-alb-ingress-controller, HAproxy and nginx. For instance the sendfile() operation. If you need to stream a large file or large amount of files, then you may want to go for one of these options.

aws-alb-ingress-controller directly routes traffic to your Kubernetes services, which is both good and bad, because it can reduce latency, but comes with the risk of depending on kube-proxy routing. kube-proxy routing can take up to 30 seconds, ETCD ttl, for finding pods from dead nodes. In Skipper we passively observe errors from endpoints and are able to drop these from the load balancer members. We add these to an actively checked member pool, which will enable endpoints if these are healthy again from skipper\u2019s point of view. Additionally the aws-alb-ingress-controller does not support features like ALB sharing, or Server Name Indication which can reduce costs. Features like path rewriting are also not currently supported.

Traefik has a good community and support for Kubernetes. Skipper originates from Project Mosaic which was started in 2015. Back then Traefik was not yet a mature project and still had time to go before the v1.0.0 release. Traefik also does not currently support our OpenTracing provider. It also did not support traffic splitting when we started stackset-controller for automated traffic switching. We have also recently done significant work on running Skipper as API gateway within Kubernetes, which could potentially help many teams that run many small services on Kubernetes. Skipper predicates and filters are a powerful abstraction which can enhance the system easily.

"},{"location":"kubernetes/ingress-controller/#comparison-with-service-mesh","title":"Comparison with service mesh","text":"

Why run Skipper and not Istio, Linkerd or other service-mesh solutions?

Skipper has a Kubernetes native integration, which is reliable, proven in production since end of 2015 as of March 2019 run in 112 Kubernetes clusters at Zalando. Skipper already has most of the features provided by service meshes:

  • Authentication/Authorization in Kubernetes ingress, and can also integrate a custom service with webhook
  • Diagnosis tools that support latency, bandwidth throttling, random content and more.
  • Rich Metrics which you can enable and disable in the Prometheus format.
  • Support for different Opentracing providers including jaeger, lightstep and instana
  • Ratelimits support with cluster ratelimits as an pending solution, which enables you to stop login attacks easily
  • Connects to endpoints directly, instead of using Kubernetes services
  • Retries requests, if the request can be safely retried, which is only the case if the error happens on the TCP/IP connection establishment or a backend whose requests are defined as idempotent.
  • Simple East-West Communication which enables proper communication paths without the need of yet another tool to do service discovery. See how to run skipper as API Gateway with East-West setup, if you want to run this powerful setup. Kubernetes, Skipper and DNS are the service discovery in this case.
  • Blue-green deployments with automation if you like to use stackset-controller
  • shadow-traffic to determine if the new version is able to handle the traffic the same as the old one
  • A simple way to do A/B tests
  • You are free to use cloud providers TLS terminations and certificate rotation, which is reliable and secure. Employees cannot download private keys and certificates are certified by a public CA. Many mTLS setups rely on insecure CA handling and are hard to debug in case of failure.
  • We are happy to receive issues and pull requests in our repository, but if you need a feature which can not be implemented upstream, you are also free to use skipper as a library and create internal features to do whatever you want.

With Skipper you do not need to choose to go all-in and you are able to add features as soon as you need or are comfortable.

"},{"location":"kubernetes/ingress-controller/#what-is-an-ingress-controller","title":"What is an Ingress-Controller?","text":"

Ingress-controllers are serving http requests into a Kubernetes cluster. Most of the time traffic will pass through ingress and go to the Kubernetes endpoints of the respective pods. For having a successful ingress, you need to have a DNS name pointing to a set of stable IP addresses that act as a load balancer.

Skipper as ingress-controller:

  • cloud: deploy behind the cloud load balancer
  • baremetal: deploy behind your hardware/software load balancer and have all skipper as members in one pool.

You would point your DNS entries to the load balancer in front of skipper, for example automated using external-dns.

"},{"location":"kubernetes/ingress-controller/#why-skipper-uses-endpointslices-or-endpoints-and-not-services","title":"Why Skipper uses EndpointSlices or Endpoints and not Services?","text":"

Skipper does not use the ClusterIP of Kubernetes Services to route traffic to the pods. Instead it uses the Endpointslices or Endpoints API to bypass kube-proxy created iptables to remove overhead like conntrack entries for iptables DNAT. Skipper can also reuse connections to Pods, such that you have no overhead in establishing connections all the time. To prevent errors on node failures, Skipper also does automatic retries to another endpoint in case it gets a connection refused or TLS handshake error to the endpoint. Other reasons are future support of features like session affinity, different load balancer algorithms or distributed loadbalancing also known as service mesh.

"},{"location":"kubernetes/ingress-controller/#using-endpointslices-instead-of-endpoints","title":"Using EndpointSlices instead of Endpoints","text":"

EndpointSlices provide the ability to scale beyond 1000 load balancer members in one pool.

To enable EndpointSlices you need to run skipper or routesrv with -enable-kubernetes-endpointslices=true.

"},{"location":"kubernetes/ingress-controller/#using-services-instead-of-endpoints","title":"Using Services instead of Endpoints","text":"

While using Endpoints is the preferred way of using Skipper as an ingress controller as described in the section above, there might be edge cases that require the use of Kubernetes Services instead.

An example of scenario where you might need to use Services is when you rely on Istio networking features to connect multiple clusters, as the IPs of Kubernetes Endpoints will not resolve in all cases.

If you find yourself in this category, you can override the default behaviour by setting the KubernetesForceService flag to true in the Skipper.Options struct. This will cause Skipper to create routes with BackendType=eskip.NetworkBackend instead of BackendType=eskip.LBBackend and use the following address format: http://<service name>.<namespace>.svc.cluster.local:<port>. See the Kubernetes Service DNS documentation for more information.

"},{"location":"kubernetes/ingress-controller/#aws-deployment","title":"AWS deployment","text":"

In AWS, this could be an ALB with DNS pointing to the ALB. The ALB can then point to an ingress-controller running on an EC2 node and uses Kubernetes hostnetwork port specification in the Pod spec.

A logical overview of the traffic flow in AWS is shown in this picture:

We described that Skipper bypasses Kubernetes Service and uses directly endpoints for good reasons, therefore the real traffic flow is shown in the next picture.

"},{"location":"kubernetes/ingress-controller/#baremetal-deployment","title":"Baremetal deployment","text":"

In datacenter, baremetal environments, you probably have a hardware load balancer or some haproxy or nginx setup, that serves most of your production traffic and DNS points to these endpoints. For example *.ingress.example.com could point to your virtual server IPs in front of ingress. Skippers could be used as pool members, which do the http routing. Your load balancer of choice could have a wildcard certificate for *.ingress.example.com and DNS for this would point to your load balancer. You can also automate DNS records with external-dns, if you for example use PowerDNS as provider and have a load balancer controller that modifies the status field in ingress to your load balancer virtual IP.

"},{"location":"kubernetes/ingress-controller/#routesrv","title":"RouteSRV","text":"

In kubernetes skipper-ingress fetches ingress/routegroup configurations every 3s, with high number of skipper pods ~100 we faced issues with kube-apiserver. At which we introduced RouteSRV, which will serve as a layer between kube-apiserver and skipper ingress, so it will give us more flexiability in scaling skipper-ingress without affecting k8s-apiserver

"},{"location":"kubernetes/ingress-controller/#kubernetes-dataclient-as-routes-source","title":"Kubernetes dataclient as routes source","text":"
  graph TD;\n      kapis(kubeapiserver) --fetches ingresses--> s(skipper);
"},{"location":"kubernetes/ingress-controller/#kubernetes-with-routesrv-as-routes-source","title":"Kubernetes with RouteSRV as routes source","text":"
  graph TD;\n  kapis(kubeapiserver) --fetches ingresses--> s(routesrv) --fetches routes--> d1(skipper1) & d2(skipper2);
"},{"location":"kubernetes/ingress-controller/#requirements","title":"Requirements","text":"

In general for one endpoint you need, a DNS A/AAAA record pointing to one or more load balancer IPs. Skipper is best used behind this layer 4 load balancer to route and manipulate HTTP data.

minimal example:

  • layer 4 load balancer has 1.2.3.4:80 as socket for a virtual server pointing to all skipper ingress
  • *.ingress.example.com points to 1.2.3.4
  • ingress object with host entry for myapp.ingress.example.com targets a service type ClusterIP
  • service type ClusterIP has a selector that targets your Pods of your myapp deployment

TLS example:

  • same as before, but you would terminate TLS on your layer 4 load balancer
  • layer 4 load balancer has 1.2.3.4:443 as socket for a virtual server
  • you can use an automated redirect for all port 80 requests to https with -kubernetes-https-redirect and change the default redirect code with -kubernetes-https-redirect-code
"},{"location":"kubernetes/ingress-controller/#install-skipper-as-ingress-controller","title":"Install Skipper as ingress-controller","text":"

You should have a base understanding of Kubernetes and Ingress.

Prerequisites:

  1. You should checkout the git repository to have access to the manifests: git clone https://github.com/zalando/skipper.git
  2. You should enter the cloned directory: cd skipper
  3. You have to choose how to install skipper-ingress. You can install it as dameonset or as deployment.

Beware, in order to get traffic from the internet, we would need to have a load balancer in front to direct all traffic to skipper. Skipper will route the traffic based on ingress objects. The load balancer should have a HTTP health check, that does a GET request to /kube-system/healthz on all Kubernetes worker nodes. This method is simple and used successfully in production. In AWS you can run kube-ingress-aws-controller to create these load balancers automatically based on the ingress definition.

"},{"location":"kubernetes/ingress-controller/#deployment-style","title":"Deployment style","text":"

Follow the deployment style you like: dameonset or deployment.

"},{"location":"kubernetes/ingress-controller/#daemonset","title":"Daemonset","text":"

We start to deploy skipper-ingress as a daemonset, use hostNetwork and expose the TCP port 9999 on each Kubernetes worker node for incoming ingress traffic.

To deploy all manifests required for the daemonset style, you can run:

kubectl create -f docs/kubernetes/deploy/daemonset\n
# cat docs/kubernetes/deploy/daemonset/daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: skipper-ingress\n  namespace: kube-system\n  labels:\n    application: skipper-ingress\n    version: v0.10.180\n    component: ingress\nspec:\n  selector:\n    matchLabels:\n      application: skipper-ingress\n  updateStrategy:\n    type: RollingUpdate\n  template:\n    metadata:\n      name: skipper-ingress\n      labels:\n        application: skipper-ingress\n        version: v0.11.1\n        component: ingress\n    spec:\n      priorityClassName: system-node-critical\n      serviceAccountName: skipper-ingress\n      tolerations:\n      - key: dedicated\n        operator: Exists\n      nodeSelector:\n        kubernetes.io/role: worker\n      hostNetwork: true\n      containers:\n      - name: skipper-ingress\n        image: registry.opensource.zalan.do/teapot/skipper:v0.12.0\n        ports:\n        - name: ingress-port\n          containerPort: 9999\n          hostPort: 9999\n        - name: metrics-port\n          containerPort: 9911\n        args:\n          - \"skipper\"\n          - \"-kubernetes\"\n          - \"-kubernetes-in-cluster\"\n          - \"-kubernetes-path-mode=path-prefix\"\n          - \"-address=:9999\"\n          - \"-wait-first-route-load\"\n          - \"-proxy-preserve-host\"\n          - \"-serve-host-metrics\"\n          - \"-enable-ratelimits\"\n          - \"-experimental-upgrade\"\n          - \"-metrics-exp-decay-sample\"\n          - \"-reverse-source-predicate\"\n          - \"-lb-healthcheck-interval=3s\"\n          - \"-metrics-flavour=codahale,prometheus\"\n          - \"-enable-connection-metrics\"\n          - \"-max-audit-body=0\"\n          - \"-histogram-metric-buckets=.01,.025,.05,.075,.1,.2,.3,.4,.5,.75,1,2,3,4,5,7,10,15,20,30,60,120,300,600\"\n        resources:\n          requests:\n            cpu: 150m\n            memory: 150Mi\n        readinessProbe:\n          httpGet:\n            path: /kube-system/healthz\n            port: 9999\n          initialDelaySeconds: 5\n          timeoutSeconds: 5\n        securityContext:\n          readOnlyRootFilesystem: true\n          runAsNonRoot: true\n          runAsUser: 1000\n

Please check, that you are using the latest release, and do not use latest tag in production. While skipper is quite stable as library and proxy, there is ongoing development to make skipper more safe, increase visibility, fix issues that lead to incidents and add features.

"},{"location":"kubernetes/ingress-controller/#deployment","title":"Deployment","text":"

We start to deploy skipper-ingress as a deployment with an HPA, use hostNetwork and expose the TCP port 9999 on each Kubernetes worker node for incoming ingress traffic.

To deploy all manifests required for the deployment style, you can run:

kubectl create -f docs/kubernetes/deploy/deployment\n

Now, let\u2019s see what we have just deployed. This will create serviceaccount, PodSecurityPolicy and RBAC rules such that skipper-ingress is allowed to listen on the hostnetwork and poll ingress resources.

# cat docs/kubernetes/deploy/deployment/rbac.yaml\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n  name: hostnetwork\nspec:\n  hostNetwork: true\n  hostPorts:\n  - max: 10000\n    min: 50\n  supplementalGroups:\n    rule: RunAsAny\n  fsGroup:\n    rule: RunAsAny\n  runAsUser:\n    # Require the container to run without root privileges.\n    rule: 'MustRunAsNonRoot'\n  seLinux:\n    rule: RunAsAny\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: hostnetwork-psp\nrules:\n- apiGroups:\n  - extensions\n  resourceNames:\n  - hostnetwork\n  resources:\n  - podsecuritypolicies\n  verbs:\n  - use\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: skipper-ingress\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: skipper-ingress\nrules:\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  verbs:\n  - get\n  - list\n- apiGroups:\n    - extensions\n  resources:\n    - ingresses\n  verbs:\n    - get\n    - list\n- apiGroups: [\"\"]\n  resources:\n    - namespaces\n    - services\n    - endpoints\n    - pods\n  verbs:\n    - get\n    - list\n- apiGroups:\n    - discovery.k8s.io\n  resources:\n    - endpointslices\n  verbs:\n    - get\n    - list\n- apiGroups:\n  - zalando.org\n  resources:\n  - routegroups\n  verbs:\n  - get\n  - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: skipper-ingress\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: skipper-ingress\nsubjects:\n- kind: ServiceAccount\n  name: skipper-ingress\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: skipper-ingress-hostnetwork-psp\n  namespace: kube-system\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: hostnetwork-psp\nsubjects:\n- kind: ServiceAccount\n  name: skipper-ingress\n  namespace: kube-system\n

The next file creates deployment with all options passed to skipper, that you should care in a basic production setup.

# cat docs/kubernetes/deploy/deployment/deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: skipper-ingress\n  namespace: kube-system\n  labels:\n    application: skipper-ingress\n    version: v0.11.40\n    component: ingress\nspec:\n  strategy:\n    rollingUpdate:\n      maxSurge: 0\n  selector:\n    matchLabels:\n      application: skipper-ingress\n  template:\n    metadata:\n      labels:\n        application: skipper-ingress\n        version: v0.11.40\n        component: ingress\n    spec:\n      affinity:\n        podAntiAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            - labelSelector:\n                matchExpressions:\n                - key: application\n                  operator: In\n                  values:\n                  - skipper-ingress\n              topologyKey: kubernetes.io/hostname\n      priorityClassName: system-cluster-critical\n      serviceAccountName: skipper-ingress\n      nodeSelector:\n        kubernetes.io/role: worker\n      dnsPolicy: ClusterFirstWithHostNet\n      hostNetwork: true\n      containers:\n      - name: skipper-ingress\n        image: registry.opensource.zalan.do/teapot/skipper:v0.12.0\n        ports:\n        - name: ingress-port\n          containerPort: 9999\n          hostPort: 9999\n        args:\n          - \"skipper\"\n          - \"-kubernetes\"\n          - \"-kubernetes-in-cluster\"\n          - \"-kubernetes-path-mode=path-prefix\"\n          - \"-address=:9999\"\n          - \"-wait-first-route-load\"\n          - \"-proxy-preserve-host\"\n          - \"-serve-host-metrics\"\n          - \"-disable-metrics-compat\"\n          - \"-enable-profile\"\n          - \"-enable-ratelimits\"\n          - \"-experimental-upgrade\"\n          - \"-metrics-exp-decay-sample\"\n          - \"-reverse-source-predicate\"\n          - \"-lb-healthcheck-interval=3s\"\n          - \"-metrics-flavour=prometheus\"\n          - \"-enable-connection-metrics\"\n          - \"-max-audit-body=0\"\n          - \"-histogram-metric-buckets=.0001,.00025,.0005,.00075,.001,.0025,.005,.0075,.01,.025,.05,.075,.1,.2,.3,.4,.5,.75,1,2,3,4,5,7,10,15,20,30,60,120,300,600\"\n          - \"-expect-continue-timeout-backend=30s\"\n          - \"-keepalive-backend=30s\"\n          - \"-max-idle-connection-backend=0\"\n          - \"-response-header-timeout-backend=1m\"\n          - \"-timeout-backend=1m\"\n          - \"-tls-timeout-backend=1m\"\n          - \"-close-idle-conns-period=20s\"\n          - \"-idle-timeout-server=62s\"\n          - \"-read-timeout-server=5m\"\n          - \"-write-timeout-server=60s\"\n          - '-default-filters-prepend=enableAccessLog(4,5) -> lifo(2000,20000,\"3s\")'\n        resources:\n          limits:\n            cpu: \"4\"\n            memory: \"1Gi\"\n          requests:\n            cpu: \"4\"\n            memory: \"1Gi\"\n        readinessProbe:\n          httpGet:\n            path: /kube-system/healthz\n            port: 9999\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n        securityContext:\n          readOnlyRootFilesystem: true\n          runAsNonRoot: true\n          runAsUser: 1000\n

This will deploy a HorizontalPodAutoscaler to scale skipper-ingress based on load.

# cat docs/kubernetes/deploy/deployment/hpa.yaml\napiVersion: autoscaling/v2beta1\nkind: HorizontalPodAutoscaler\nmetadata:\n  name: skipper-ingress\n  namespace: kube-system\n  labels:\n    application: skipper-ingress\nspec:\n  scaleTargetRef:\n    apiVersion: apps/v1\n    kind: Deployment\n    name: skipper-ingress\n  minReplicas: 3\n  maxReplicas: 50\n  metrics:\n  - type: Resource\n    resource:\n      name: cpu\n      targetAverageUtilization: 70\n  - type: Resource\n    resource:\n      name: memory\n      targetAverageUtilization: 70\n

The next file will group skipper-ingress with a service, such that internal clients can access skipper via Kubernetes service.

# cat docs/kubernetes/deploy/deployment/service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n  name: skipper-ingress\n  namespace: kube-system\n  labels:\n    application: skipper-ingress\n  annotations:\n    prometheus.io/path: /metrics\n    prometheus.io/port: \"9911\"\n    prometheus.io/scrape: \"true\"\nspec:\n  type: ClusterIP\n  ports:\n    - port: 80\n      targetPort: 9999\n      protocol: TCP\n  selector:\n    application: skipper-ingress\n
"},{"location":"kubernetes/ingress-controller/#test-your-skipper-setup","title":"Test your skipper setup","text":"

We now deploy a simple demo application serving html:

# cat docs/kubernetes/deploy/demo/deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: skipper-demo\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      application: skipper-demo\n  template:\n    metadata:\n      labels:\n        application: skipper-demo\n    spec:\n      containers:\n      - name: skipper-demo\n        image: registry.opensource.zalan.do/teapot/skipper:v0.12.0\n        args:\n          - \"skipper\"\n          - \"-inline-routes\"\n          - \"* -> inlineContent(\\\"<body style='color: white; background-color: green;'><h1>Hello!</h1>\\\") -> <shunt>\"\n        ports:\n        - containerPort: 9090\n

We deploy a service type ClusterIP that we will select from ingress:

# cat docs/kubernetes/deploy/demo/svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n  name: skipper-demo\n  labels:\n    application: skipper-demo\nspec:\n  type: ClusterIP\n  ports:\n    - port: 80\n      protocol: TCP\n      targetPort: 9090\n      name: external\n  selector:\n    application: skipper-demo\n

To deploy the demo application, you have to run:

kubectl create -f docs/kubernetes/deploy/demo/\n

Now we have a skipper-ingress running as daemonset or deployment exposing the TCP port 9999 on each worker nodes, which has a running skipper-ingress instance, a backend application running with 2 replicas that serves some html on TCP port 9090, and we expose a cluster service on TCP port 80. Besides skipper-ingress, deployment and service can not be reached from outside the cluster. Now we expose the application with Ingress to the external network:

# cat demo-ing.yaml\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: skipper-demo\nspec:\n  rules:\n  - host: skipper-demo.<mydomain.org>\n    http:\n      paths:\n      - backend:\n          service:\n            name: skipper-demo\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

To deploy this ingress, you have to run:

kubectl create -f demo-ing.yaml\n

Skipper will configure itself for the given ingress, such that you can test doing:

curl -v -H\"Host: skipper-demo.<mydomain.org>\" http://<nodeip>:9999/\n

The next question you may ask is: how to expose this to your customers?

The answer depends on your setup and complexity requirements. In the simplest case you could add one A record in your DNS *.<mydomain.org> to your frontend load balancer IP that directs all traffic from *.<mydomain.org> to all Kubernetes worker nodes on TCP port 9999. The load balancer health check should make sure, that only nodes with ready skipper-ingress instances will get traffic.

A more complex setup we use in production and can be done with something that configures your frontend load balancer, for example kube-aws-ingress-controller, and your DNS, external-dns automatically.

"},{"location":"kubernetes/ingress-controller/#multiple-skipper-deployments","title":"Multiple skipper deployments","text":"

If you want to split for example internal and public traffic, it might be a good choice to split your ingress deployments. Skipper has the flag --kubernetes-ingress-class=<regexp> to only select ingress objects that have the annotation kubernetes.io/ingress.class set to something that is matched by <regexp>. Skipper will only create routes for ingress objects with it\u2019s annotation or ingress objects that do not have this annotation.

The default ingress class is skipper, if not set. You have to create your ingress objects with the annotation kubernetes.io/ingress.class: skipper to make sure only skipper will serve the traffic.

Example ingress:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    kubernetes.io/ingress.class: skipper\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-controller/#scoping-skipper-deployments-to-a-single-namespace","title":"Scoping Skipper Deployments to a Single Namespace","text":"

In some instances you might want skipper to only watch for ingress objects created in a single namespace. This can be achieved by using kubernetes-namespace=<string> where <string> is the Kubernetes namespace. Specifying this option forces Skipper to look at the namespace ingresses endpoint rather than the cluster-wide ingresses endpoint.

By default this value is an empty string (\"\") and will scope the skipper instance to be cluster-wide, watching all Ingress objects across all namespaces.

"},{"location":"kubernetes/ingress-controller/#helm-based-deployment","title":"Helm-based deployment","text":"

Helm calls itself the package manager for Kubernetes and therefore take cares of the deployment of whole applications including resources like services, configurations and so on.

Skipper is also available as community contributed Helm chart in the public quay.io registry. The latest packaged release can be found here. The source code is available at GitHub.

The chart includes resource definitions for the following use cases:

  • RBAC
  • Prometheus-Operator

As this chart is not maintained by the Skipper developers and is still under development only the basic deployment workflow is covered here. Check the GitHub repository for all details.

To be able to deploy the chart you will need the following components:

  • helm CLI (Install guide here)
  • Helm registry plugin (available here)

If your environment is setup correctly you should be able to run helm version --client and helm registry version quay.io and get some information about your tooling without any error.

It is possible to deploy the chart without any further configuration like this:

helm registry upgrade quay.io/baez/skipper -- \\\n    --install \\\n    --wait \\\n    \"your release name e.g. skipper\"\n

The --wait switch can be omitted as it only takes care that Helm is waiting until the chart is completely deployed (meaning all resources are created).

To update the deployment to a newer version the same command can be used.

If you have RBAC enabled in your Kubernetes instance you don\u2019t have to create all the previously described resources on your own but you can let Helm create them by simply adding one more switch:

helm registry upgrade quay.io/baez/skipper -- \\\n    --install \\\n    --wait \\\n    --set rbac.create=true \\\n    \"your release name e.g. skipper\"\n

There are some more options available for customization of the chart. Check the repository if you need more configuration possibilities.

"},{"location":"kubernetes/ingress-controller/#run-as-api-gateway-with-east-west-setup","title":"Run as API Gateway with East-West setup","text":"

East-West means cluster internal service-to-service communication. For this you need to resolve DNS to skipper for one or more additional domains of your choice. When Ingress or RouteGroup objects specify such domains Skipper will add the configured predicates.

"},{"location":"kubernetes/ingress-controller/#skipper","title":"Skipper","text":"

To enable the East-West in skipper, you need to run skipper with -kubernetes-east-west-range-domains and -kubernetes-east-west-range-predicates configuration flags. Check the East West Range feature. Skipper will analyze all routes from Kubernetes objects and, the identified East-West routes will have the predicates specified appended.

For example, for running skipper with the skipper.cluster.local domain, and setting East-West routes to accept just internal traffic, use the following config:

skipper \\\n  -kubernetes-east-west-range-domains=\"skipper.cluster.local\" \\\n  -kubernetes-east-west-range-predicates='ClientIP(\"10.2.0.0/16\")'\n

It assumes 10.2.0.0/16 is your PODs\u2019 CIDR, you have to change it accordingly to your environment.

You need also to have a kubernetes service type ClusterIP and write down the IP (p.e. 10.3.11.28), which you will need in CoreDNS setup.

"},{"location":"kubernetes/ingress-controller/#coredns","title":"CoreDNS","text":"

You can create the DNS records with the template plugin from CoreDNS.

Corefile example:

.:53 {\n    errors\n    health\n    kubernetes cluster.local in-addr.arpa ip6.arpa {\n        pods insecure\n        upstream\n        fallthrough in-addr.arpa ip6.arpa\n    }\n    template IN A skipper.cluster.local  {\n      match \"^.*[.]skipper[.]cluster[.]local\"\n      answer \"{{ .Name }} 60 IN A 10.3.11.28\"\n      fallthrough\n    }\n    prometheus :9153\n    proxy . /etc/resolv.conf\n    cache 30\n    reload\n}\n

"},{"location":"kubernetes/ingress-controller/#usage","title":"Usage","text":"

If the setup is correct, skipper will protect the following ingress example with the ClientIP predicate:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: demo\n  namespace: default\nspec:\n  rules:\n  - host: demo.skipper.cluster.local\n    http:\n      paths:\n      - backend:\n          service:\n            name: example\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

Your clients inside the cluster should call this example with demo.skipper.cluster.local in their host header. Example from inside a container:

curl demo.skipper.cluster.local\n

Skipper won\u2019t accept traffic from any IP outside of the configured network CIDR.

Note

Depending on your environment, you might want to allow traffic not just from the PODs\u2019 CIDR, but, also, from your nodes\u2019 CIDR. When doing so, pay attention to do not allow traffic from your LoadBalancer and, by consequence, external traffic. You can use different combinations of predicates like ClientIP and SourceFromLast to achieve the desired protection.

"},{"location":"kubernetes/ingress-controller/#running-with-cluster-ratelimits","title":"Running with Cluster Ratelimits","text":"

Cluster ratelimits require a communication exchange method to build a skipper swarm to have a shared knowledge about the requests passing all skipper instances. To enable this feature you need to add command line option -enable-swarm and -enable-ratelimits. The rest depends on the implementation, that can be:

  • Redis
  • alpha version: SWIM
"},{"location":"kubernetes/ingress-controller/#redis-based","title":"Redis based","text":"

Additionally you have to add -swarm-redis-urls to skipper args:. For example: -swarm-redis-urls=skipper-redis-0.skipper-redis.kube-system.svc.cluster.local:6379,skipper-redis-1.skipper-redis.kube-system.svc.cluster.local:6379.

Running skipper with hostNetwork in kubernetes will not be able to resolve redis hostnames as shown in the example, if skipper does not have dnsPolicy: ClusterFirstWithHostNet in it\u2019s Pod spec, see also DNS policy in the official Kubernetes documentation.

This setup is considered experimental and should be carefully tested before running it in production.

Example redis statefulset with headless service:

apiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  labels:\n    application: skipper-redis\n    version: v6.2.4\n  name: skipper-redis\n  namespace: kube-system\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      application: skipper-redis\n  serviceName: skipper-redis\n  template:\n    metadata:\n      labels:\n        application: skipper-redis\n        version: v6.2.4\n    spec:\n      containers:\n      - image: registry.opensource.zalan.do/library/redis-6-alpine:6-alpine-20210712\n        name: skipper-redis\n        ports:\n        - containerPort: 6379\n          protocol: TCP\n        readinessProbe:\n          exec:\n            command:\n            - redis-cli\n            - ping\n          failureThreshold: 3\n          initialDelaySeconds: 10\n          periodSeconds: 60\n          successThreshold: 1\n          timeoutSeconds: 1\n        resources:\n          limits:\n            cpu: 100m\n            memory: 100Mi\n      dnsPolicy: ClusterFirst\n      restartPolicy: Always\n      schedulerName: default-scheduler\n---\napiVersion: v1\nkind: Service\nmetadata:\n  labels:\n    application: skipper-redis\n  name: skipper-redis\n  namespace: kube-system\nspec:\n  clusterIP: None\n  ports:\n  - port: 6379\n    protocol: TCP\n    targetPort: 6379\n  selector:\n    application: skipper-redis\n  type: ClusterIP\n
"},{"location":"kubernetes/ingress-controller/#swim-based","title":"SWIM based","text":"

SWIM is a \u201cScalable Weakly-consistent Infection-style Process Group Membership Protocol\u201d, which is very interesting for example to use for cluster ratelimits. This setup is not considered stable enough to run production, yet.

Additionally you have to add the following command line flags to skipper\u2019s container spec args::

-swarm-port=9990\n-swarm-label-selector-key=application\n-swarm-label-selector-value=skipper-ingress\n-swarm-leave-timeout=5s\n-swarm-max-msg-buffer=4194304\n-swarm-namespace=kube-system\n

and open another port in Kubernetes and your Firewall settings to make the communication work with TCP and UDP to the specified swarm-port:

- containerPort: 9990\n  hostPort: 9990\n  name: swarm-port\n  protocol: TCP\n
"},{"location":"kubernetes/ingress-controller/#upgrades","title":"Upgrades","text":"

Please always read the announcements of the vX.Y.0 release page, because these will document in case we break something in a backwards non compatible way. Most of the time it will be safe to deploy minor version updates, but better to know in advance if something could break.

"},{"location":"kubernetes/ingress-controller/#v0140","title":"=v0.14.0

Kubernetes dataclient removes support for ingress v1beta1. What does it mean for you?

  1. If you run with enabled -kubernetes-ingress-v1, you won\u2019t need to do anything and you can safely delete the flag while updating to >=0.14.0.
  2. If you use skipper as library and pass KubernetesIngressV1: true via kubernetes.Options into kubernetes.New(), then you won\u2019t need to do anything and you can safely delete passing the option while updating to >=0.14.0.
  3. If you use Ingress v1beta1 and run Kubernetes cluster version that does not support ingress v1, then you can\u2019t update skipper to >=0.14.0, before you upgrade your Kubernetes cluster.
  4. If you use Ingress v1beta1 and run Kubernetes cluster version that support ingress v1, then you need to allow skipper to access the new APIs with a changed RBAC. See the guide below.

If you are in case 4., you have to apply a change in your RBAC, please check the diff or the full rendered file.

Diff view (same for deployment and daemonset):

diff --git docs/kubernetes/deploy/deployment/rbac.yaml docs/kubernetes/deploy/deployment/rbac.yaml\nindex 361f3789..c0e448a4 100644\n--- docs/kubernetes/deploy/deployment/rbac.yaml\n+++ docs/kubernetes/deploy/deployment/rbac.yaml\n@@ -37,11 +37,18 @@ metadata:\n   name: skipper-ingress\n   namespace: kube-system\n ---\n-apiVersion: rbac.authorization.k8s.io/v1beta1\n+apiVersion: rbac.authorization.k8s.io/v1\n kind: ClusterRole\n metadata:\n   name: skipper-ingress\n rules:\n+- apiGroups:\n+  - networking.k8s.io\n+  resources:\n+  - ingresses\n+  verbs:\n+  - get\n+  - list\n - apiGroups:\n     - extensions\n   resources:\n@@ -66,7 +73,7 @@ rules:\n   - get\n   - list\n ---\n-apiVersion: rbac.authorization.k8s.io/v1beta1\n+apiVersion: rbac.authorization.k8s.io/v1\n kind: ClusterRoleBinding\n metadata:\n   name: skipper-ingress\n@@ -79,7 +86,7 @@ subjects:\n   name: skipper-ingress\n   namespace: kube-system\n ---\n-apiVersion: rbac.authorization.k8s.io/v1beta1\n+apiVersion: rbac.authorization.k8s.io/v1\n kind: RoleBinding\n metadata:\n   name: skipper-ingress-hostnetwork-psp\n

Full rendered RBAC files (same for deployment and daemonset):

# cat docs/kubernetes/deploy/deployment/rbac.yaml\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n  name: hostnetwork\nspec:\n  hostNetwork: true\n  hostPorts:\n  - max: 10000\n    min: 50\n  supplementalGroups:\n    rule: RunAsAny\n  fsGroup:\n    rule: RunAsAny\n  runAsUser:\n    # Require the container to run without root privileges.\n    rule: 'MustRunAsNonRoot'\n  seLinux:\n    rule: RunAsAny\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: hostnetwork-psp\nrules:\n- apiGroups:\n  - extensions\n  resourceNames:\n  - hostnetwork\n  resources:\n  - podsecuritypolicies\n  verbs:\n  - use\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: skipper-ingress\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: skipper-ingress\nrules:\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  verbs:\n  - get\n  - list\n- apiGroups:\n    - extensions\n  resources:\n    - ingresses\n  verbs:\n    - get\n    - list\n- apiGroups: [\"\"]\n  resources:\n    - namespaces\n    - services\n    - endpoints\n    - pods\n  verbs:\n    - get\n    - list\n- apiGroups:\n    - discovery.k8s.io\n  resources:\n    - endpointslices\n  verbs:\n    - get\n    - list\n- apiGroups:\n  - zalando.org\n  resources:\n  - routegroups\n  verbs:\n  - get\n  - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: skipper-ingress\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: skipper-ingress\nsubjects:\n- kind: ServiceAccount\n  name: skipper-ingress\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: skipper-ingress-hostnetwork-psp\n  namespace: kube-system\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: hostnetwork-psp\nsubjects:\n- kind: ServiceAccount\n  name: skipper-ingress\n  namespace: kube-system\n
","text":""},{"location":"kubernetes/ingress-usage/","title":"Skipper Ingress Usage","text":"

This documentation is meant for people deploying to Kubernetes Clusters and describes to use Ingress and low level and high level features Skipper provides.

RouteGroups, a relatively new feature, also support each of these features, with an alternative format that supports them in a more native way. The documentation contains a section with mapping Ingress to RouteGroups.

"},{"location":"kubernetes/ingress-usage/#skipper-ingress-annotations","title":"Skipper Ingress Annotations","text":"Annotation example data usage zalando.org/backend-weights {\"my-app-1\": 80, \"my-app-2\": 20} blue-green deployments zalando.org/skipper-filter consecutiveBreaker(15) arbitrary filters zalando.org/skipper-predicate QueryParam(\"version\", \"^alpha$\") arbitrary predicates zalando.org/skipper-routes Method(\"OPTIONS\") -> status(200) -> <shunt> extra custom routes zalando.org/ratelimit ratelimit(50, \"1m\") deprecated, use zalando.org/skipper-filter instead zalando.org/skipper-ingress-redirect \"true\" change the default HTTPS redirect behavior for specific ingresses (true/false) zalando.org/skipper-ingress-redirect-code 301 change the default HTTPS redirect code for specific ingresses zalando.org/skipper-loadbalancer consistentHash defaults to roundRobin, see available choices zalando.org/skipper-backend-protocol fastcgi (experimental) defaults to http, see available choices zalando.org/skipper-ingress-path-mode path-prefix (deprecated) please use Ingress version 1 pathType option, which defaults to ImplementationSpecific and does not change the behavior. Skipper\u2019s path-mode defaults to kubernetes-ingress, see available choices, to change the default use -kubernetes-path-mode."},{"location":"kubernetes/ingress-usage/#supported-service-types","title":"Supported Service types","text":"

Ingress backend definitions are services, which have different service types.

Service type supported workaround ClusterIP yes \u2014 NodePort yes \u2014 ExternalName yes \u2014 LoadBalancer no it should not, because Kubernetes cloud-controller-manager will maintain it"},{"location":"kubernetes/ingress-usage/#http-host-header-routing","title":"HTTP Host header routing","text":"

HTTP host header is defined within the rules host section and this route will match by http Host: app-default.example.org and route to endpoints selected by the Kubernetes service app-svc on port 80.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

To have 2 routes with different Host headers serving the same backends, you have to specify 2 entries in the rules section, as Kubernetes defined the ingress spec. This is often used in cases of migrations from one domain to another one or migrations to or from bare metal datacenters to cloud providers or inter cloud or intra cloud providers migrations. Examples are AWS account migration, AWS to GCP migration, GCP to bare metal migration or bare metal to Alibaba Cloud migration.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n  - host: foo.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#multiple-ingresses-defining-the-same-route","title":"Multiple Ingresses defining the same route","text":"

Warning

If multiple ingresses define the same host and the same predicates, traffic routing may become non-deterministic.

Consider the following two ingresses which have the same hostname and therefore overlap. In skipper the routing of this is currently undefined as skipper doesn\u2019t pick one over the other, but just creates routes (possible overlapping) for each of the ingresses.

In this example (taken from the issues we saw in production clusters) one ingress points to a service with no endpoints and the other to a service with endpoints. (Most likely service-x was renamed to service-x-live and the old ingress was forgot).

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: service-x\nspec:\n  rules:\n  - host: service-x.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: service-x # this service has 0 endpoints\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

\u200b

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: service-x-live\nspec:\n  rules:\n  - host: service-x.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: service-x-live\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#ingress-path-handling","title":"Ingress path handling","text":"

Skipper supports all Kubernetes path-types as documented in Kubernetes documentation.

Ingress paths can be interpreted in five different modes:

  1. pathType: Prefix results in PathSubtree predicate)
  2. pathType: Exact results in Path predicate)
  3. pathType: ImplementationSpecific
  4. based on the kubernetes ingress specification
  5. as plain regular expression
  6. as a path prefix (same as pathType: Prefix and results in PathSubtree)

The default is 3.1 the kubernetes ingress mode. It can be changed by a startup option to any of the other modes, and the individual ingress rules can also override the default behavior with the zalando.org/skipper-ingress-path-mode annotation. You can also set for each path rule a different Kubernetes pathType like Prefix and Exact.

E.g.:

zalando.org/skipper-ingress-path-mode: path-prefix\n
"},{"location":"kubernetes/ingress-usage/#kubernetes-ingress-specification-base-path","title":"Kubernetes ingress specification base path","text":"

By default, the ingress path mode is set to kubernetes-ingress, which is interpreted as a regular expression with a mandatory leading /, and is automatically prepended by a ^ control character, enforcing that the path has to be at the start of the incoming request path.

"},{"location":"kubernetes/ingress-usage/#plain-regular-expression","title":"Plain regular expression","text":"

When the path mode is set to path-regexp, the ingress path is interpreted similar to the default kubernetes ingress specification way, but is not prepended by the ^ control character.

"},{"location":"kubernetes/ingress-usage/#path-prefix","title":"Path prefix","text":"

When the path mode is set to path-prefix, the ingress path is not a regular expression. As an example, /foo/bar will match /foo/bar or /foo/bar/baz, but won\u2019t match /foo/barooz.

When PathPrefix is used, the path matching becomes deterministic when a request could match more than one ingress routes otherwise.

In PathPrefix mode, when a Path or PathSubtree predicate is set in an annotation, the predicate in the annotation takes precedence over the normal ingress path.

"},{"location":"kubernetes/ingress-usage/#filters-and-predicates","title":"Filters and Predicates","text":"
  • Filters can manipulate http data, which is not possible in the ingress spec.
  • Predicates change the route matching, beyond normal ingress definitions

This example shows how to add predicates and filters:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-predicate: predicate1 && predicate2 && .. && predicateN\n    zalando.org/skipper-filter: filter1 -> filter2 -> .. -> filterN\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#custom-routes","title":"Custom Routes","text":"

Please consider using RouteGroups, instead of custom routes!

Custom routes is a way of extending the default routes configured for an ingress resource.

Sometimes you just want to return a header, redirect or even static html content. You can return from skipper without doing a proxy call to a backend, if you end your filter chain with <shunt>. The use of <shunt> recommends the use in combination with status() filter, to not respond with the default http code, which defaults to 404. To match your custom route with higher priority than your ingress you also have to add another predicate, for example the Method(\u201cGET\u201d) predicate to match the route with higher priority.

Custom routes specified in ingress will always add the Host() predicate to match the host header specified in the ingress rules:. If there is a path: definition in your ingress, then it will be based on the skipper command line parameter -kubernetes-path-mode set one of these predicates:

  • Path()
  • PathSubtree()
  • PathRegexp()

If you have a path: value defined in your ingress resource, a custom route is not allowed to use Path() nor PathSubtree() predicates. You will get an error in Skipper logs, similar to:

[APP]time=\"2019-01-02T13:30:16Z\" level=error msg=\"Failed to add route having 2 path routes: Path(\\\"/foo/bar\\\") -> inlineContent(\\\"custom route\\\") -> status(200) -> <shunt>\"\n
"},{"location":"kubernetes/ingress-usage/#redirects","title":"Redirects","text":""},{"location":"kubernetes/ingress-usage/#overwrite-the-current-ingress-with-a-redirect","title":"Overwrite the current ingress with a redirect","text":"

Sometimes you want to overwrite the current ingress with a redirect to a nicer downtime page.

The following example shows how to create a temporary redirect with status code 307 to https://outage.example.org. No requests will pass to your backend defined, because the created route from the annotation zalando.org/skipper-routes will get 3 Predicates Host(\"^app-default[.]example[.]org$\") && Path(\"/\") && PathRegexp(\"/\"), instead of the 2 Predicates Host(\"^app-default[.]example[.]org$\") && Path(\"/\"), that will be created for the ingress backend.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: app\n  namespace: default\n  annotations:\n    zalando.org/skipper-routes: |\n       redirect_app_default: PathRegexp(\"/\") -> redirectTo(307, \"https://outage.example.org/\") -> <shunt>;\nspec:\n  rules:\n  - host: \"app-default.example.org\"\n    http:\n      paths:\n      - path: /\n        pathType: Prefix\n        backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n
"},{"location":"kubernetes/ingress-usage/#redirect-a-specific-path-from-ingress","title":"Redirect a specific path from ingress","text":"

Sometimes you want to have a redirect from http://app-default.example.org/myredirect to https://somewhere.example.org/another/path.

The following example shows how to create a permanent redirect with status code 308 from http://app-default.example.org/myredirect to https://somewhere.example.org/another/path, other paths will not be redirected and passed to the backend selected by serviceName=app-svc and servicePort=80:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: app\n  namespace: default\n  annotations:\n    zalando.org/skipper-routes: |\n       redirect_app_default: PathRegexp(\"/myredirect\") -> redirectTo(308, \"https://somewhere.example.org/another/path\") -> <shunt>;\nspec:\n  rules:\n  - host: \"app-default.example.org\"\n    http:\n      paths:\n      - path: /\n        pathType: Prefix\n        backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n
"},{"location":"kubernetes/ingress-usage/#return-static-content","title":"Return static content","text":"

The following example sets a response header X: bar, a response body <html><body>hello</body></html> and respond from the ingress directly with a HTTP status code 200:

zalando.org/skipper-routes: |\n  Path(\"/\") -> setResponseHeader(\"X\", \"bar\") -> inlineContent(\"<html><body>hello</body></html>\") -> status(200) -> <shunt>\n

Keep in mind that you need a valid backend definition to backends which are available, otherwise Skipper would not accept the entire route definition from the ingress object for safety reasons.

"},{"location":"kubernetes/ingress-usage/#cors-example","title":"CORS example","text":"

This example shows how to add a custom route for handling OPTIONS requests.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-routes: |\n      Method(\"OPTIONS\") ->\n      setResponseHeader(\"Access-Control-Allow-Origin\", \"*\") ->\n      setResponseHeader(\"Access-Control-Allow-Methods\", \"GET, OPTIONS\") ->\n      setResponseHeader(\"Access-Control-Allow-Headers\", \"Authorization\") ->\n      status(200) -> <shunt>\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

This will generate a custom route for the ingress which looks like this:

Host(/^app-default[.]example[.]org$/) && Method(\"OPTIONS\") ->\n  setResponseHeader(\"Access-Control-Allow-Origin\", \"*\") ->\n  setResponseHeader(\"Access-Control-Allow-Methods\", \"GET, OPTIONS\") ->\n  setResponseHeader(\"Access-Control-Allow-Headers\", \"Authorization\") ->\n  status(200) -> <shunt>\n
"},{"location":"kubernetes/ingress-usage/#multiple-routes","title":"Multiple routes","text":"

You can also set multiple routes, but you have to set the names of the route as defined in eskip:

zalando.org/skipper-routes: |\n  routename1: Path(\"/\") -> clientRatelimit(2, \"1h\") -> inlineContent(\"A\") -> status(200) -> <shunt>;\n  routename2: Path(\"/foo\") -> clientRatelimit(5, \"1h\") -> inlineContent(\"B\") -> status(200) -> <shunt>;\n

Make sure the ; semicolon is used to terminate the routes, if you use multiple routes definitions.

Disclaimer: This feature works only with having different Path* predicates in ingress, if there are no paths rules defined. For example this will not work:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: skipper-ingress\n  annotations:\n    kubernetes.io/ingress.class: skipper\n    zalando.org/skipper-routes: |\n       redirect1: Path(\"/foo/\") -> redirectTo(308, \"/bar/\") -> <shunt>;\nspec:\n  rules:\n  - host: foo.bar\n    http:\n      paths:\n      - path: /something\n        pathType: Prefix\n        backend:\n          service:\n            name: something\n            port:\n              number: 80\n      - path: /else\n        pathType: Prefix\n        backend:\n          service:\n            name: else\n            port:\n              number: 80\n

A possible solution is to use skipper\u2019s RouteGroups.

"},{"location":"kubernetes/ingress-usage/#filters-basic-http-manipulations","title":"Filters - Basic HTTP manipulations","text":"

HTTP manipulations are done by using skipper filters. Changes can be done in the request path, meaning request to your backend or in the response path to the client, which made the request.

The following examples can be used within zalando.org/skipper-filter annotation.

"},{"location":"kubernetes/ingress-usage/#add-a-request-header","title":"Add a request Header","text":"

Add a HTTP header in the request path to your backend.

setRequestHeader(\"X-Foo\", \"bar\")\n
"},{"location":"kubernetes/ingress-usage/#add-a-response-header","title":"Add a response Header","text":"

Add a HTTP header in the response path of your clients.

setResponseHeader(\"X-Foo\", \"bar\")\n
"},{"location":"kubernetes/ingress-usage/#enable-compression","title":"Enable compression","text":"

Compress responses with accepted encoding (more details here).

compress() // compress all valid MIME types\ncompress(\"text/html\") // only compress HTML files\ncompress(11, \"text/html\") // control the level of compression, 1 = fastest, 11 = best compression (fallback to 9 for gzip), 0 = no compression\n
"},{"location":"kubernetes/ingress-usage/#set-the-path","title":"Set the Path","text":"

Change the path in the request path to your backend to /newPath/.

setPath(\"/newPath/\")\n
"},{"location":"kubernetes/ingress-usage/#modify-path","title":"Modify Path","text":"

Modify the path in the request path from /api/foo to your backend to /foo.

modPath(\"^/api/\", \"/\")\n
"},{"location":"kubernetes/ingress-usage/#set-the-querystring","title":"Set the Querystring","text":"

Set the Querystring in the request path to your backend to ?text=godoc%20skipper.

setQuery(\"text\", \"godoc skipper\")\n
"},{"location":"kubernetes/ingress-usage/#redirect","title":"Redirect","text":"

Create a redirect with HTTP code 301 to https://foo.example.org/.

redirectTo(301, \"https://foo.example.org/\")\n
"},{"location":"kubernetes/ingress-usage/#cookies","title":"Cookies","text":"

Set a Cookie in the request path to your backend.

requestCookie(\"test-session\", \"abc\")\n

Set a Cookie in the response path of your clients.

responseCookie(\"test-session\", \"abc\", 31536000)\nresponseCookie(\"test-session\", \"abc\", 31536000, \"change-only\")\n\n// response cookie without HttpOnly:\njsCookie(\"test-session-info\", \"abc-debug\", 31536000, \"change-only\")\n
"},{"location":"kubernetes/ingress-usage/#authorization","title":"Authorization","text":"

Our authentication and authorization tutorial or filter auth godoc shows how to use filters for authorization.

"},{"location":"kubernetes/ingress-usage/#basic-auth","title":"Basic Auth","text":"
% htpasswd -nbm myName myPassword\n\nbasicAuth(\"/path/to/htpasswd\")\nbasicAuth(\"/path/to/htpasswd\", \"My Website\")\n
"},{"location":"kubernetes/ingress-usage/#bearer-token-oauthjwt","title":"Bearer Token (OAuth/JWT)","text":"

OAuth2/JWT tokens can be validated and allowed based on different content of the token. Please check the filter documentation for that:

  • oauthTokeninfoAnyScope
  • oauthTokeninfoAllScope
  • oauthTokeninfoAnyKV
  • oauthTokeninfoAllKV

There are also auth predicates, which will allow you to match a route based on the content of a token:

  • JWTPayloadAnyKV()
  • JWTPayloadAllKV()

These are not validating the tokens, which should be done separately by the filters mentioned above.

"},{"location":"kubernetes/ingress-usage/#diagnosis-throttling-bandwidth-latency","title":"Diagnosis - Throttling Bandwidth - Latency","text":"

For diagnosis purpose there are filters that enable you to throttle the bandwidth or add latency. For the full list of filters see our diag filter godoc page.

bandwidth(30) // incoming in kb/s\nbackendBandwidth(30) // outgoing in kb/s\nbackendLatency(120) // in ms\n

Filter documentation:

  • latency
  • bandwidth
  • chunks
  • backendlatency
  • backendChunks
  • randomcontent
"},{"location":"kubernetes/ingress-usage/#flow-id-to-trace-request-flows","title":"Flow Id to trace request flows","text":"

To trace request flows skipper can generate a unique Flow Id for every HTTP request that it receives. You can then find the trace of the request in all your access logs. Skipper sets the X-Flow-Id header to a unique value. Read more about this in our flowid filter and godoc.

 flowId(\"reuse\")\n
"},{"location":"kubernetes/ingress-usage/#filters-reliability-features","title":"Filters - reliability features","text":"

Filters can modify http requests and responses. There are plenty of things you can do with them.

"},{"location":"kubernetes/ingress-usage/#circuitbreaker","title":"Circuitbreaker","text":""},{"location":"kubernetes/ingress-usage/#consecutive-breaker","title":"Consecutive Breaker","text":"

The consecutiveBreaker filter is a breaker for the ingress route that open if the backend failures for the route reach a value of N (in this example N=15), where N is a mandatory argument of the filter and there are some more optional arguments documented.

consecutiveBreaker(15)\n

The ingress spec would look like this:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-filter: consecutiveBreaker(15)\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#rate-breaker","title":"Rate Breaker","text":"

The rateBreaker filter is a breaker for the ingress route that open if the backend failures for the route reach a value of N within a window of the last M requests, where N (in this example 30) and M (in this example 300) are mandatory arguments of the filter and there are some more optional arguments documented.

rateBreaker(30, 300)\n

The ingress spec would look like this:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-filter: rateBreaker(30, 300)\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#ratelimits","title":"Ratelimits","text":"

There are two kind of ratelimits:

  1. Client side ratelimits are used to slow down login enumeration attacks, that targets your login pages. This is a security protection for DDoS or login attacks.
  2. Service or backend side ratelimits are used to protect your services due too much traffic. This can be used in an emergency situation to make sure you calm down ingress traffic or in general if you know how much calls per duration your backend is able to handle.
  3. Cluster ratelimits can be enforced either on client or on service side as described above.

Ratelimits are enforced per route.

More details you will find in ratelimit package and in our ratelimit tutorial.

"},{"location":"kubernetes/ingress-usage/#client-ratelimits","title":"Client Ratelimits","text":"

The example shows 20 calls per hour per client, based on X-Forwarded-For header or IP in case there is no X-Forwarded-For header set, are allowed to each skipper instance for the given ingress.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-filter: clientRatelimit(20, \"1h\")\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

If you need to rate limit service to service communication and you use Authorization headers to protect your backend from your clients, then you can pass a 3 parameter to group clients by \u201cAuthorization Header\u201d:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-filter: clientRatelimit(20, \"1h\", \"auth\")\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#service-ratelimits","title":"Service Ratelimits","text":"

The example shows 50 calls per minute are allowed to each skipper instance for the given ingress.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-filter: ratelimit(50, \"1m\")\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#cluster-ratelimits","title":"Cluster Ratelimits","text":"

Cluster ratelimits are eventual consistent and require the flag -enable-swarm to be set.

"},{"location":"kubernetes/ingress-usage/#service","title":"Service","text":"

The example shows 50 calls per minute are allowed to pass this ingress rule to the backend.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-filter: clusterRatelimit(\"groupSvcApp\", 50, \"1m\")\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#client","title":"Client","text":"

The example shows 10 calls per hour are allowed per client, X-Forwarded-For header, to pass this ingress rule to the backend.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-filter: clusterClientRatelimit(\"groupSvcApp\", 10, \"1h\")\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#path-ratelimit","title":"Path ratelimit","text":"

To ratelimit a specific path use a second ingress definition like

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: app-default\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: app-login\n  annotations:\n    zalando.org/skipper-predicate: Path(\"/login\")\n    zalando.org/skipper-filter: clusterClientRatelimit(\"login-ratelimit\", 10, \"1h\")\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
or use RouteGroups.

"},{"location":"kubernetes/ingress-usage/#shadow-traffic","title":"Shadow Traffic","text":"

If you want to test a new replacement of a production service with production load, you can copy incoming requests to your new endpoint and ignore the responses from your new backend. This can be done by the tee() and teenf() filters.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-filter: teenf(\"https://app-new.example.org\")\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#predicates","title":"Predicates","text":"

Predicates are influencing the route matching, which you might want to carefully test before using it in production. This enables you to do feature toggles or time based enabling endpoints.

You can use all kinds of predicates with filters together.

"},{"location":"kubernetes/ingress-usage/#feature-toggle","title":"Feature Toggle","text":"

Feature toggles are often implemented as query string to select a new feature. Normally you would have to implement this in your application, but Skipper can help you with that and you can select routes with an ingress definition.

You create 2 ingresses that matches the same route, here host header match to app-default.example.org and one ingress has a defined query parameter to select the route to the alpha version deployment. If the query string in the URL has version=alpha set, for example https://app-default.example.org/mypath?version=alpha, the service alpha-svc will get the traffic, if not prod-svc.

alpha-svc:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-predicate: QueryParam(\"version\", \"^alpha$\")\n  name: alpha-app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: alpha-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

prod-svc:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: prod-app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: prod-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#ip-whitelisting","title":"IP Whitelisting","text":"

This ingress route will only allow traffic from networks 1.2.3.0/24 and 195.168.0.0/17

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-predicate: Source(\"1.2.3.0/24\", \"195.168.0.0/17\")\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#ab-test","title":"A/B test","text":"

Implementing A/B testing is heavy. Skipper can help you to do that. You need to have a traffic split somewhere and have your customers sticky to either A or B flavor of your application. Most likely people would implement using cookies. Skipper can set a cookie with responseCookie() in a response to the client and the cookie predicate can be used to match the route based on the cookie. Like this you can have sticky sessions to either A or B for your clients. This example shows to have 10% traffic using A and the rest using B.

10% choice of setting the Cookie \u201cflavor\u201d to \u201cA\u201d:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-predicate: Traffic(.1, \"flavor\", \"A\")\n    zalando.org/skipper-filter: responseCookie(\"flavor\", \"A\", 31536000)\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: a-app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

Rest is setting Cookie \u201cflavor\u201d to \u201cB\u201d:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-filter: responseCookie(\"flavor, \"B\", 31536000)\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: b-app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

To be sticky, you have to create 2 ingress with predicate to match routes with the cookie we set before. For \u201cA\u201d this would be:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-predicate: Cookie(\"flavor\", /^A$/)\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: a-app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n

For \u201cB\u201d this would be:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-predicate: Cookie(\"flavor\", /^B$/)\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: b-app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#blue-green-deployments","title":"Blue-Green deployments","text":"

To do blue-green deployments you have to have control over traffic switching. Skipper gives you the opportunity to set weights to backend services in your ingress specification. zalando.org/backend-weights is a hash map, which key relates to the serviceName of the backend and the value is the weight of traffic you want to send to the particular backend. It works for more than 2 backends, but for simplicity this example shows 2 backends, which should be the default case for supporting blue-green deployments.

In the following example my-app-1 service will get 80% of the traffic and my-app-2 will get 20% of the traffic:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: my-app\n  labels:\n    application: my-app\n  annotations:\n    zalando.org/backend-weights: |\n      {\"my-app-1\": 80, \"my-app-2\": 20}\nspec:\n  rules:\n  - host: my-app.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: my-app-1\n            port:\n              name: http\n        pathType: Prefix\n        path: /\n      - backend:\n          service:\n            name: my-app-2\n            port:\n              name: http\n        pathType: Prefix\n        path: /\n

For more advanced blue-green deployments, check out our stackset-controller.

"},{"location":"kubernetes/ingress-usage/#chaining-filters-and-predicates","title":"Chaining Filters and Predicates","text":"

You can set multiple filters in a chain similar to the eskip format.

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-predicate: Cookie(\"flavor\", /^B$/) && Source(\"1.2.3.0/24\", \"195.168.0.0/17\")\n    zalando.org/skipper-filter: clientRatelimit(50, \"10m\") -> requestCookie(\"test-session\", \"abc\")\n  name: app\nspec:\n  rules:\n  - host: app-default.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#controlling-https-redirect","title":"Controlling HTTPS redirect","text":"

Skipper Ingress can provide HTTP->HTTPS redirection. Enabling it and setting the status code used by default can be done with the command line options: -kubernetes-https-redirect and -kubernetes-https-redirect-code. By using annotations, this behavior can be overridden from the individual ingress specs for the scope of routes generated based on these ingresses specs.

Annotations:

  • zalando.org/skipper-ingress-redirect: the possible values are true or false. When the global HTTPS redirect is disabled, the value true enables it for the current ingress. When the global redirect is enabled, the value false disables it for the current ingress.
  • zalando.org/skipper-ingress-redirect-code: the possible values are integers 300 <= x < 400. Sets the redirect status code for the current ingress.

Example:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-ingress-redirect: \"true\"\n    zalando.org/skipper-ingress-redirect-code: 301\n  name: app\nspec:\n  rules:\n  - host: mobile-api.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/ingress-usage/#load-balancer-algorithm","title":"Load Balancer Algorithm","text":"

You can set the loadbalancer algorithm, which is used to find the next endpoint for a given request with the ingress annotation zalando.org/skipper-loadbalancer.

For example, for some workloads you might want to have always the same endpoint for the same client. For this use case there is the consistent hash algorithm, that finds for a client detected by the IP or X-Forwarded-For header, the same backend. If the backend is not available it would switch to another one.

Annotations:

  • zalando.org/skipper-loadbalancer see available choices

Example:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    zalando.org/skipper-loadbalancer: consistentHash\n  name: app\nspec:\n  rules:\n  - host: websocket.example.org\n    http:\n      paths:\n      - backend:\n          service:\n            name: app-svc\n            port:\n              number: 80\n        pathType: ImplementationSpecific\n
"},{"location":"kubernetes/routegroup-crd/","title":"RouteGroup CRD Semantics","text":"

This document contains the semantic definition of the RouteGroup CRD. For more information, see the route group documentation, or see the CRD yaml definition.

"},{"location":"kubernetes/routegroup-crd/#concepts","title":"Concepts","text":""},{"location":"kubernetes/routegroup-crd/#routegroup","title":"RouteGroup","text":"

A RouteGroup represents a grouped routing specification, with one or more backends, typically a Kubernetes service. The Skipper routes yielded by a route group are handled atomically, meaning that if any problem is detected during processing a route group, none of the generated routes from that group will be applied.

"},{"location":"kubernetes/routegroup-crd/#hosts","title":"Hosts","text":"

A list of allowed DNS host names that an incoming HTTP request should match in order to be handled by the route group. Host list is mandatory.

"},{"location":"kubernetes/routegroup-crd/#backend","title":"Backend","text":"

Typically a Kubernetes service, but not necessarily. The routes generated from route groups need to have a backend, therefore at least one backend is mandatory.

"},{"location":"kubernetes/routegroup-crd/#default-backend","title":"Default backend","text":"

A route group can contain multiple routes. If the routes don\u2019t identify the backend, then the default backends are used. There can be multiple default backends, e.g. to support weighted A/B testing.

"},{"location":"kubernetes/routegroup-crd/#route","title":"Route","text":"

Routes describe how a matching HTTP request is handled and where it is forwarded to.

"},{"location":"kubernetes/routegroup-crd/#predicate","title":"Predicate","text":"

A predicate is used during route lookup to identify which route should handle an incoming request. Route group routes provide dedicated fields for the most common predicates like the path or the HTTP method, but in the predicates list field, it is possible to define and configure any predicate supported by Skipper. See the Predicates section of the reference.

"},{"location":"kubernetes/routegroup-crd/#filter","title":"Filter","text":"

A filter is used during handling the request to shape the request flow. In a route group, any filter supported by Skipper is allowed to be used. See the Filters section of the reference.

"},{"location":"kubernetes/routegroup-crd/#routegroup-top-level-object","title":"RouteGroup - top level object","text":"

The route group spec must contain hosts, backends, routes and optional default backends.

apiVersion: zalando.org/v1\nkind: RouteGroup\nspec:\n  hosts:\n  - <string>\n  backends:\n  - <backend>\n  defaultBackends:\n  - <backendRef>\n  routes:\n  - <route>\n
"},{"location":"kubernetes/routegroup-crd/#backend_1","title":"Backend","text":"

The <backend> object defines the type of a backend and the required configuration based on the type. Required fields are the name and the type, while the rest of the fields may be required based on the type.

<backend>\n  name: <string>\n  type: <string>            one of \"service|shunt|loopback|dynamic|lb|network\"\n  address: <string>         optional, required for type=network\n  algorithm: <string>       optional, valid for type=lb|service, values=roundRobin|random|consistentHash|powerOfRandomNChoices\n  endpoints: <stringarray>  optional, required for type=lb\n  serviceName: <string>     optional, required for type=service\n  servicePort: <number>     optional, required for type=service\n

See more about Skipper backends in the backend documentation.

"},{"location":"kubernetes/routegroup-crd/#backend-reference","title":"Backend reference","text":"

The <backendRef> object references a backend that is defined in the route group\u2019s backends field. The name is a required field, while the weight is optional. If no weight is used at all, then the traffic is split evenly between the referenced backends. One or more backend reference may appear on the route group level as a default backend, or in a route.

<backendRef>\n- backendName: <string>\n  weight: <number>          optional\n
"},{"location":"kubernetes/routegroup-crd/#route_1","title":"Route","text":"

The <route> object defines the actual routing setup with custom matching rules (predicates), and request flow shaping with filters.

<route>\n  path: <string>            either path or pathSubtree is allowed\n  pathSubtree: <string>     either path or pathSubtree is allowed\n  pathRegexp: <string>      optional\n  methods: <stringarray>    optional, one of the HTTP methods per entry \"GET|HEAD|PATCH|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE\", defaults to all\n  predicates: <stringarray> optional\n  filters: <stringarray>    optional\n  backends:                 optional, overrides defaults\n  - <backendRef>\n

The path, pathSubtree and pathRegexp fields work the same way as the predicate counterparts on eskip routes. See the reference manual for more details.

The methods field defines which methods an incoming request can have in order to match the route.

The items in the predicates and filter fields take lists of predicates and filters, respectively, defined in their eskip format. Example:

  predicates:\n  - Cookie(\"alpha\", \"enabled\")\n  - Header(\"X-Test\", \"true\")\n  filters:\n  - setQuery(\"test\", \"alpha\")\n  - compress()\n

See also:

  • predicates
  • filters

The references in the backends field, if present, define which backends a route should use."},{"location":"kubernetes/routegroup-validation/","title":"RouteGroup Operations","text":"

RouteGroup is a Custom Resource Definition (CRD).

"},{"location":"kubernetes/routegroup-validation/#routegroup-validation","title":"RouteGroup Validation","text":"

CRDs can be validated at create and update time. The validation can be done via JSON Schemas, which enables input type validation and string validation with regular expressions. In addition to JSON Schema you can use a custom validation webhook.

For RouteGroup we provide a CRD yaml with JSON schema and a validation webhook as separate binary webhook in the same docker container as skipper.

"},{"location":"kubernetes/routegroup-validation/#synopsis","title":"Synopsis","text":"
% docker run registry.opensource.zalan.do/teapot/skipper:latest webhook --help\nusage: webhook [<flags>]\n\nFlags:\n  --help                         Show context-sensitive help (also try --help-long and --help-man).\n  --debug                        Enable debug logging\n  --tls-cert-file=TLS-CERT-FILE  File containing the certificate for HTTPS\n  --tls-key-file=TLS-KEY-FILE    File containing the private key for HTTPS\n  --address=\":9443\"              The address to listen on\n
"},{"location":"kubernetes/routegroup-validation/#validation-webhook-installation","title":"Validation Webhook Installation","text":"

A Kubernetes validation webhook can be installed next to the kubernetes API server. In order to do this you need:

  1. A container running the webhook
  2. A ValidatingWebhookConfiguration configuration

Kubernetes container spec for the RouteGroup validation webhook can be installed in your kube-apiserver Pod, such that it can communicate via localhost.

We use the TLS based ValidatingWebhookConfiguration configuration, that we show below, but you can also scroll down to the Configuration without TLS. The configuration will make sure the validation webhook is called on all create and update operations to zalando.org/v1/routegroups by the Kubernetes API server.

"},{"location":"kubernetes/routegroup-validation/#configuration-with-tls","title":"Configuration with TLS","text":"

Here you can see the Pod spec with enabled TLS:

- name: routegroups-admission-webhook\n  image: registry.opensource.zalan.do/teapot/skipper:v0.13.3\n  args:\n    - webhook\n    - --address=:9085\n    - --tls-cert-file=/etc/kubernetes/ssl/admission-controller.pem\n    - --tls-key-file=/etc/kubernetes/ssl/admission-controller-key.pem\n  lifecycle:\n    preStop:\n      exec:\n        command: [\"/bin/sh\", \"-c\",  \" sleep 60\"]\n  readinessProbe:\n    httpGet:\n      scheme: HTTPS\n      path: /healthz\n      port: 9085\n    initialDelaySeconds: 5\n    timeoutSeconds: 5\n  resources:\n    requests:\n      cpu: 50m\n      memory: 100Mi\n  ports:\n    - containerPort: 9085\n  volumeMounts:\n    - mountPath: /etc/kubernetes/ssl\n      name: ssl-certs-kubernetes\n      readOnly: true\n

Make sure you pass the caBundle and set the url depending where your webhook container is running.

apiVersion: admissionregistration.k8s.io/v1\nkind: ValidatingWebhookConfiguration\nmetadata:\n  name: \"routegroup-admitter.teapot.zalan.do\"\n  labels:\n    application: routegroups-admission-webhook\nwebhooks:\n  - name: \"routegroup-admitter.teapot.zalan.do\"\n    rules:\n      - operations: [\"CREATE\", \"UPDATE\"]\n        apiGroups: [\"zalando.org\"]\n        apiVersions: [\"v1\"]\n        resources: [\"routegroups\"]\n    clientConfig:\n      url: \"https://localhost:9085/routegroups\"\n      caBundle: |\n        ...8<....\n    admissionReviewVersions: [\"v1\"]\n    sideEffects: None\n    timeoutSeconds: 5\n

"},{"location":"kubernetes/routegroup-validation/#configuration-without-tls","title":"Configuration without TLS","text":"

In case you don\u2019t need TLS, you do not need some of the configuration shown above.

Container spec without TLS:

- name: routegroups-admission-webhook\n  image: registry.opensource.zalan.do/teapot/skipper:v0.13.3\n  args:\n    - webhook\n    - --address=:9085\n  lifecycle:\n    preStop:\n      exec:\n        command: [\"/bin/sh\", \"-c\",  \" sleep 60\"]\n  readinessProbe:\n    httpGet:\n      path: /healthz\n      port: 9085\n    initialDelaySeconds: 5\n    timeoutSeconds: 5\n  resources:\n    requests:\n      cpu: 50m\n      memory: 100Mi\n  ports:\n    - containerPort: 9085\n

Validation webhook configuration without TLS:

apiVersion: admissionregistration.k8s.io/v1\nkind: ValidatingWebhookConfiguration\nmetadata:\n  name: \"routegroup-admitter.teapot.zalan.do\"\n  labels:\n    application: routegroups-admission-webhook\nwebhooks:\n  - name: \"routegroup-admitter.teapot.zalan.do\"\n    rules:\n      - operations: [\"CREATE\", \"UPDATE\"]\n        apiGroups: [\"zalando.org\"]\n        apiVersions: [\"v1\"]\n        resources: [\"routegroups\"]\n    clientConfig:\n      url: \"http://localhost:9085/routegroups\"\n    admissionReviewVersions: [\"v1\"]\n    sideEffects: None\n    timeoutSeconds: 5\n
"},{"location":"kubernetes/routegroups/","title":"Route groups","text":"

Route groups are an alternative to the Kubernetes Ingress format for defining ingress rules. They allow to define Skipper routing in Kubernetes, while providing a straightforward way to configure the routing features supported by Skipper and not defined by the generic Ingress.

"},{"location":"kubernetes/routegroups/#skipper-as-kubernetes-ingress-controller","title":"Skipper as Kubernetes Ingress controller","text":"

Skipper is an extensible HTTP router with rich route matching, and request flow and traffic shaping capabilities. Through its integration with Kubernetes, it can be used in the role of an ingress controller for forwarding incoming external requests to the right services in a cluster. Kubernetes provides the Ingress specification to define the rules by which an ingress controller should handle the incoming traffic. The specification is simple and generic, but doesn\u2019t offer a straightforward way to benefit from Skipper\u2019s rich HTTP related functionality.

"},{"location":"kubernetes/routegroups/#routegroups","title":"RouteGroups","text":"

A RouteGroup is a custom Kubernetes resource definition. It provides a way to define the ingress routing for Kubernetes services. It allows route matching based on any HTTP request attributes, and provides a clean way for the request flow augmentation and traffic shaping. It supports higher level features like gradual traffic switching, A/B testing, and more.

Example:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-routes\nspec:\n  backends:\n  - name: variant-a\n    type: service\n    serviceName: service-a\n    servicePort: 80\n  - name: variant-b\n    type: service\n    serviceName: service-b\n    servicePort: 80\n  defaultBackends:\n  - backendName: variant-b\n  routes:\n  - pathSubtree: /\n    filters:\n    - responseCookie(\"canary\", \"A\")\n    predicates:\n    - Traffic(.1)\n    backends:\n    - backendName: variant-a\n  - pathSubtree: /\n    filters:\n    - responseCookie(\"canary\", \"B\")\n  - pathSubtree: /\n    predicates:\n    - Cookie(\"canary\", \"A\")\n    backends:\n    - backendName: variant-a\n  - pathSubtree: /\n    predicates:\n    - Cookie(\"canary\", \"B\")\n

(See a more detailed explanation of the above example further down in this document.)

Links:

  • RouteGroup semantics
  • CRD definition
"},{"location":"kubernetes/routegroups/#requirements","title":"Requirements","text":"
  • External DNS v0.7.0 or higher
  • Kubernetes Ingress Controller for AWS v0.10.0 or higher
"},{"location":"kubernetes/routegroups/#installation","title":"Installation","text":"

The definition file of the CRD can be found as part of Skipper\u2019s source code, at:

https://github.com/zalando/skipper/blob/master/dataclients/kubernetes/deploy/apply/routegroups_crd.yaml

To install it manually in a cluster, assuming the current directory is the root of Skipper\u2019s source, call this command:

kubectl apply -f dataclients/kubernetes/deploy/apply/routegroups_crd.yaml\n

This will install a namespaced resource definition, providing the RouteGroup kind:

  • full name: routegroups.zalando.org
  • resource group: zalando.org/v1
  • resource names: routegroup, routegroups, rg, rgs
  • kind: RouteGroup

The route groups, once any is defined, can be displayed then via kubectl as:

kubectl get rgs\n

The API URL of the routegroup resources will be:

https://kubernetes-api-hostname/apis/zalando.org/v1/routegroups

"},{"location":"kubernetes/routegroups/#usage","title":"Usage","text":"

The absolute minimal route group configuration for a Kubernetes service (my-service) looks as follows:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-route-group\nspec:\n  backends:\n  - name: my-backend\n    type: service\n    serviceName: my-service\n    servicePort: 80\n  routes:\n    - pathSubtree: /\n      backends:\n        - backendName: my-backend\n

This is equivalent to the ingress:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: my-ingress\nspec:\n  defaultBackend:\n    service:\n      name: my-service\n      port:\n        number: 80\n

Notice that the route group contains a list of actual backends, and the defined service backend is then referenced as the default backend. This structure plays a role in supporting scenarios like A/B testing and gradual traffic switching, explained below. The backend definition also has a type field, whose values can be service, lb, network, shunt, loopback or dynamic. More details on that below.

Creating, updating and deleting route groups happens the same way as with ingress objects. E.g, manually applying a route group definition:

kubectl apply -f my-route-group.yaml\n
"},{"location":"kubernetes/routegroups/#hosts","title":"Hosts","text":"
  • Format

Hosts contain hostnames that are used to match the requests handled by a given route group. They are also used to update the required DNS entries and load balancer configuration if the cluster is set up that way.

Note that it is also possible to use any Skipper predicate in the routes of a route group, with the Host predicate included, but the hostnames defined that way will not serve as input for the DNS configuration.

"},{"location":"kubernetes/routegroups/#backends","title":"Backends","text":"
  • Format
  • General backend reference

RouteGroups support different backends. The most typical backend type is the \u2018service\u2019, and it works the same way as in case of ingress definitions.

In a RouteGroup, there can be multiple backends and they are listed on the top level of the route group spec, and are referenced from the actual routes or as default backends.

"},{"location":"kubernetes/routegroups/#typeservice","title":"type=service","text":"

This backend resolves to a Kubernetes service. It works the same way as in case of Ingress definitions. Skipper resolves the Services to the available Endpoints belonging to the Service, and generates load balanced routes using them. (This basically means that under the hood, a service backend becomes an lb backend.)

"},{"location":"kubernetes/routegroups/#typelb","title":"type=lb","text":"

This backend provides load balancing between multiple network endpoints. Keep in mind that the service type backend automatically generates load balanced routes for the service endpoints, so this backend type typically doesn\u2019t need to be used for services.

"},{"location":"kubernetes/routegroups/#typenetwork","title":"type=network","text":"

This backend type results in routes that proxy incoming requests to the defined network address, regardless of the Kubernetes semantics, and allows URLs that point somewhere else, potentially outside of the cluster, too.

"},{"location":"kubernetes/routegroups/#typeshunt-typeloopback-typedynamic","title":"type=shunt, type=loopback, type=dynamic","text":"

These backend types allow advanced routing setups. Please check the reference manual for more details.

"},{"location":"kubernetes/routegroups/#default-backends","title":"Default Backends","text":"
  • Format

A default backend is a reference to one of the defined backends. When a route doesn\u2019t specify which backend(s) to use, the ones referenced in the default backends will be used.

In case there are no individual routes at all in the route group, a default set of routes (one or more) will be generated and will proxy the incoming traffic to the default backends.

The reason, why multiple backends can be referenced as default, is that this makes it easy to execute gradual traffic switching between different versions, even more than two, of the same application. See more.

"},{"location":"kubernetes/routegroups/#routes","title":"Routes","text":"
  • Format

Routes define where to and how the incoming requests will be proxied. The predicates, including the path, pathSubtree, pathRegexp and methods fields, and any free-form predicate listed under the predicates field, control which requests are matched by a route, the filters can apply changes to the forwarded requests and the returned responses, and the backend refs, if defined, override the default backends, where the requests will be proxied to. If a route group doesn\u2019t contain any explicit routes, but it contains default backends, a default set of routes will be generated for the route group.

Important to bear in mind about the path fields, that the plain \u2018path\u2019 means exact path match, while \u2018pathSubtree\u2019 behaves as a path prefix, and so it is more similar to the path in the Ingress specification.

See also:

  • predicates
  • filters
"},{"location":"kubernetes/routegroups/#gradual-traffic-switching","title":"Gradual traffic switching","text":"

The weighted backend references allow to split the traffic of a single route and send it to different backends with the ratio defined by the weights of the backend references. E.g:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-routes\nspec:\n  hosts:\n  - api.example.org\n  backends:\n  - name: api-svc-v1\n    type: service\n    serviceName: api-service-v1\n    servicePort: 80\n  - name: api-svc-v2\n    type: service\n    serviceName: foo-service-v2\n    servicePort: 80\n  routes:\n  - pathSubtree: /api\n    backends:\n    - backendName: api-svc-v1\n      weight: 80\n    - backendName: api-svc-v2\n      weight: 20\n

In case of the above example, 80% of the requests is sent to api-service-v1 and the rest is sent to api-service-v2.

Since this type of weighted traffic switching can be used in combination with the Traffic predicate, it is possible to control the routing of a long running A/B test, while still executing gradual traffic switching independently to deploy a new version of the variants, maybe to deploy a fix only to one variant. E.g:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-routes\nspec:\n  hosts:\n  - api.example.org\n  backends:\n  - name: variant-a\n    type: service\n    serviceName: service-a\n    servicePort: 80\n  - name: variant-b\n    type: service\n    serviceName: service-b-v1\n    servicePort: 80\n  - name: variant-b-v2\n    type: service\n    serviceName: service-b-v2\n    servicePort: 80\n  defaultBackends:\n  - backendName: variant-b\n    weight: 80\n  - backendName: variant-b-v2\n    weight: 20\n  routes:\n  - filters:\n    - responseCookie(\"canary\", \"A\")\n    predicates:\n    - Traffic(.1)\n    backends:\n    - backendName: variant-a\n  - filters:\n    - responseCookie(\"canary\", \"B\")\n  - predicates:\n    - Cookie(\"canary\", \"A\")\n    backends:\n    - backendName: variant-a\n  - predicates:\n    - Cookie(\"canary\", \"B\")\n

See also:

  • Traffic predicate
"},{"location":"kubernetes/routegroups/#mapping-from-ingress-to-routegroups","title":"Mapping from Ingress to RouteGroups","text":"

RouteGroups are one-way compatible with Ingress, meaning that every Ingress specification can be expressed in the RouteGroup format, as well. In the following, we describe the mapping from Ingress fields to RouteGroup fields.

"},{"location":"kubernetes/routegroups/#ingress-with-default-backend","title":"Ingress with default backend","text":"

Ingress:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: my-ingress\nspec:\n  defaultBackend:\n    service:\n      name: my-service\n      port:\n        number: 80\n

RouteGroup:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-route-group\nspec:\n  backends:\n  - name: my-backend\n    type: service\n    serviceName: my-service\n    servicePort: 80\n  defaultBackends:\n  - backendName: my-backend\n
"},{"location":"kubernetes/routegroups/#ingress-with-path-rule","title":"Ingress with path rule","text":"

Ingress:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: my-ingress\nspec:\n  rules:\n  - host: api.example.org\n    http:\n      paths:\n      - path: /api\n        pathType: Prefix\n        backend:\n          service:\n            name: my-service\n            port:\n              number: 80\n

RouteGroup:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-route-group\nspec:\n  hosts:\n  - api.example.org\n  backends:\n  - name: my-backend\n    type: service\n    serviceName: my-service\n    servicePort: 80\n  routes:\n  - pathSubtree: /api\n
"},{"location":"kubernetes/routegroups/#ingress-with-multiple-hosts","title":"Ingress with multiple hosts","text":"

Ingress (we need to define two rules):

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: my-ingress\nspec:\n  rules:\n  - host: api.example.org\n    http:\n      paths:\n      - path: /api\n        pathType: Prefix\n        backend:\n          service:\n            name: my-service\n            port:\n              number: 80\n  - host: legacy-name.example.org\n    http:\n      paths:\n      - path: /api\n        pathType: Prefix\n        backend:\n          service:\n            name: my-service\n            port:\n              number: 80\n

RouteGroup (we just define an additional host):

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-route-group\nspec:\n  hosts:\n  - api.example.org\n  - legacy-name.example.org\n  backends:\n  - name: my-backend\n    type: service\n    serviceName: my-service\n    servicePort: 80\n  routes:\n  - pathSubtree: /api\n
"},{"location":"kubernetes/routegroups/#ingress-with-multiple-hosts-and-different-routing","title":"Ingress with multiple hosts, and different routing","text":"

For those cases when using multiple hostnames in the same ingress with different rules, we need to apply a small workaround for the equivalent route group spec. Ingress:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: my-ingress\nspec:\n  rules:\n  - host: api.example.org\n    http:\n      paths:\n      - path: /api\n        pathType: Prefix\n        backend:\n          service:\n            name: my-service\n            port:\n              number: 80\n  - host: legacy-name.example.org\n    http:\n      paths:\n      - path: /application\n        pathType: Prefix\n        backend:\n          service:\n            name: my-service\n            port:\n              number: 80\n

RouteGroup (we need to use additional host predicates):

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-route-group\nspec:\n  hosts:\n  - api.example.org\n  - legacy-name.example.org\n  backends:\n  - name: my-backend\n    type: service\n    serviceName: my-service\n    servicePort: 80\n  routes:\n  - pathSubtree: /api\n    predicates:\n    - Host(\"api.example.org\")\n  - pathSubtree: /application\n    predicates:\n    - Host(\"legacy-name.example.org\")\n

The RouteGroups allow multiple hostnames for each route group, but by default, their union is used during routing. If we want to distinguish between them, then we need to use an additional Host predicate in the routes. Importantly, only the hostnames listed under the hosts field serve as input for the DNS and LB configuration.

"},{"location":"kubernetes/routegroups/#mapping-skipper-ingress-extensions-to-routegroups","title":"Mapping Skipper Ingress extensions to RouteGroups","text":"

Skipper accepts a set of annotations in Ingress objects that give access to certain Skipper features that would not be possible with the native fields of the Ingress spec, e.g. improved path handling or rate limiting. These annotations can be expressed now natively in the RouteGroups.

"},{"location":"kubernetes/routegroups/#zalandoorgbackend-weights","title":"zalando.org/backend-weights","text":"

Backend weights are now part of the backend references, and they can be controlled for multiple backend sets within the same route group. See Gradual traffic switching.

"},{"location":"kubernetes/routegroups/#zalandoorgskipper-filter-and-zalandoorgskipper-predicate","title":"zalando.org/skipper-filter and zalando.org/skipper-predicate","text":"

Filters and predicates are now part of the route objects, and different set of filters or predicates can be set for different routes.

"},{"location":"kubernetes/routegroups/#zalandoorgskipper-routes","title":"zalando.org/skipper-routes","text":"

\u201cCustom routes\u201d in a route group are unnecessary, because every route can be configured with predicates, filters and backends without limitations. E.g where an ingress annotation\u2019s metadata may look like this:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: my-ingress\n  zalando.org/skipper-routes: |\n    Method(\"OPTIONS\") -> status(200) -> <shunt>\nspec:\n  backend:\n    service:\n      name: my-service\n      port:\n        number: 80\n

the equivalent RouteGroup would look like this:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-route-group\nspec:\n  backends:\n  - name: my-backend\n    type: service\n    serviceName: my-service\n    servicePort: 80\n  - name: options200\n    type: shunt\n  defaultBackends:\n  - backendName: my-backend\n  routes:\n  - pathSubtree: /\n  - pathSubtree: /\n    methods: OPTIONS\n    filters:\n    - status(200)\n    backends:\n    - backendName: options200\n
"},{"location":"kubernetes/routegroups/#zalandoorgratelimit","title":"zalando.org/ratelimit","text":"

The ratelimiting can be defined on the route level among the filters, in the same format as in this annotation.

"},{"location":"kubernetes/routegroups/#zalandoorgskipper-ingress-redirect-and-zalandoorgskipper-ingress-redirect-code","title":"zalando.org/skipper-ingress-redirect and zalando.org/skipper-ingress-redirect-code","text":"

Skipper ingress provides global HTTPS redirect, but it allows individual ingresses to override the global settings: enabling/disabling it and changing the default redirect code. With route groups, this override can be achieved by simply defining an additional route, with the same matching rules, and therefore the override can be controlled eventually on a route basis. E.g:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-route-group\nspec:\n  backends:\n  - name: my-backend\n    type: service\n    serviceName: my-service\n    servicePort: 80\n  - name: redirectShunt\n    type: shunt\n  defaultBackends:\n  - backendName: my-backend\n  routes:\n  - pathSubtree: /\n  - pathSubtree: /\n    predicates:\n    - Header(\"X-Forwarded-Proto\", \"http\")\n    filters:\n    - redirectTo(302, \"https:\")\n    backends:\n    - backendName: redirectShunt\n
"},{"location":"kubernetes/routegroups/#zalandoorgskipper-loadbalancer","title":"zalando.org/skipper-loadbalancer","text":"

Skipper Ingress doesn\u2019t use the ClusterIP of the Service for forwarding the traffic to, but sends it directly to the Endpoints represented by the Service, and balances the load between them with the round-robin algorithm. The algorithm choice can be overridden by this annotation. In case of the RouteGroups, the algorithm is simply an attribute of the backend definition, and it can be set individually for each backend. E.g:

  backends:\n  - name: my-backend\n    type: service\n    serviceName: my-service\n    servicePort: 80\n    algorithm: consistentHash\n

See also:

  • Load Balancer backend
"},{"location":"kubernetes/routegroups/#zalandoorgskipper-ingress-path-mode","title":"zalando.org/skipper-ingress-path-mode","text":"

The route objects support the different path lookup modes, by using the path, pathSubtree or the pathRegexp field. See also the route matching explained for the internals. The mapping is as follows:

Ingress pathType: RouteGroup: Exact and /foo path: /foo Prefix and /foo pathSubtree: /foo Ingress (pathType: ImplementationSpecific): RouteGroup: kubernetes-ingress and /foo pathRegexp: ^/foo path-regexp and /foo pathRegexp: /foo path-prefix and /foo pathSubtree: /foo kubernetes-ingress and /foo$ path: /foo"},{"location":"kubernetes/routegroups/#multiple-skipper-deployments","title":"Multiple skipper deployments","text":"

If you want to split for example internal and public traffic, it might be a good choice to split your RouteGroups. Skipper has the flag --kubernetes-routegroup-class=<string> to only select RouteGroup objects that have the annotation zalando.org/routegroup.class set to <string>. Skipper will only create routes for RouteGroup objects with it\u2019s annotation or RouteGroup objects that do not have this annotation. The default class is skipper, if not set.

Example RouteGroup:

apiVersion: zalando.org/v1\nkind: RouteGroup\nmetadata:\n  name: my-route-group\n  annotations:\n    zalando.org/routegroup.class: internal\nspec:\n  backends:\n  - name: my-backend\n    type: service\n    serviceName: my-service\n    servicePort: 80\n  defaultBackends:\n  - backendName: my-service\n
"},{"location":"operation/deployment/","title":"Deployments and Data-Clients","text":""},{"location":"operation/deployment/#edge-http-routing","title":"Edge HTTP Routing","text":"

Edge HTTP routing is the first hit to your production HTTP loadbalancer. Skipper can serve this well and reliably in production since 2016.

On the edge you want to dispatch incoming HTTP requests to your backends, which could be a microservice architecture.

In this deployment mode you might have 100k HTTP routes, which are used in production and modified by many parties.

To support this scenario we have the etcd dataclient.

Etcd is a distributed database.

TODO: why we use ETCD for this purpose

"},{"location":"operation/deployment/#kubernetes-ingress","title":"Kubernetes Ingress","text":"

Kubernetes Ingress is the component responsible to route traffic into your Kubernetes cluster. As deployer you can define an ingress object and an ingress controller will make sure incoming traffic gets routed to her backend service as defined. Skipper supports this scenario with the Kubernetes dataclient and is used in production since end of 2016.

Skipper as ingress controller does not need to have any file configuration or anything external which configures Skipper. Skipper automatically finds Ingress objects and configures routes automatically, without reloading. The only requirement is to target all traffic you want to serve with Kubernetes to a loadbalancer pool of Skippers. This is a clear advantage over other ingress controllers like nginx, haproxy or envoy.

Read more about Skipper\u2019s Kubernetes dataclient.

"},{"location":"operation/deployment/#demos-talks","title":"Demos / Talks","text":"

In demos you may want to show arbitrary hello world applications. You can easily describe html or json output on the command line with the route-string dataclient.

"},{"location":"operation/deployment/#simple-routes-file","title":"Simple Routes File","text":"

The most static deployment that is known from apache, nginx or haproxy is write your routes into a file and start your http server. This is what the Eskip file dataclient is about.

"},{"location":"operation/operation/","title":"Operations","text":"

This is the work in progress operations guide for showing information, which are relevant for production use.

Skipper is proven to scale with number of routes beyond 300.000 routes per instance. Skipper is running with peaks to 65.000 http requests per second using multiple instances.

"},{"location":"operation/operation/#connection-options","title":"Connection Options","text":"

Skipper\u2019s connection options are allowing you to set Go\u2019s http.Server Options on the client side and http.Transport on the backend side.

\u201cIt is recommended to read this blog post about net http timeouts in order to better understand the impact of these settings.

"},{"location":"operation/operation/#backend","title":"Backend","text":"

Backend is the side skipper opens a client connection to.

Closing idle connections is required for DNS failover, because Go\u2019s http.Transport caches DNS lookups and needs to create new connections for doing so. Skipper will start a goroutine and use the specified time.Duration to call CloseIdleConnections() on that http.Transport.

-close-idle-conns-period string\n    period of closing all idle connections in seconds or as a\n    duration string. Not closing when less than 0 (default \"20\")\n

This will set MaxIdleConnsPerHost on the http.Transport to limit the number of idle connections per backend such that we do not run out of sockets.

-idle-conns-num int\n    maximum idle connections per backend host (default 64)\n

This will set MaxIdleConns on the http.Transport to limit the number for all backends such that we do not run out of sockets.

-disable-http-keepalives bool\n    forces backend to always create a new connection\n

This will set DisableKeepAlives on the http.Transport to disable HTTP keep-alive and to only use the connection for single request.

-max-idle-connection-backend int\n    sets the maximum idle connections for all backend connections\n

This will set TLSHandshakeTimeout on the http.Transport to have timeouts based on TLS connections.

-tls-timeout-backend duration\n    sets the TLS handshake timeout for backend connections (default 1m0s)\n

This will set Timeout on net.Dialer that is the implementation of DialContext, which is the TCP connection pool used in the http.Transport.

-timeout-backend duration\n    sets the TCP client connection timeout for backend connections (default 1m0s)\n

This will set KeepAlive on net.Dialer that is the implementation of DialContext, which is the TCP connection pool used in the http.Transport.

-keepalive-backend duration\n    sets the keepalive for backend connections (default 30s)\n

This will set DualStack (IPv4 and IPv6) on net.Dialer that is the implementation of DialContext, which is the TCP connection pool used in the http.Transport.

-enable-dualstack-backend\n    enables DualStack for backend connections (default true)\n
"},{"location":"operation/operation/#client","title":"Client","text":"

Client is the side skipper gets incoming calls from. Here we can set timeouts in different parts of the http connection.

This will set ReadTimeout in http.Server handling incoming calls from your clients.

-read-timeout-server duration\n    set ReadTimeout for http server connections (default 5m0s)\n

This will set ReadHeaderTimeout in http.Server handling incoming calls from your clients.

-read-header-timeout-server duration\n    set ReadHeaderTimeout for http server connections (default 1m0s)\n

This will set WriteTimeout in http.Server handling incoming calls from your clients.

-write-timeout-server duration\n    set WriteTimeout for http server connections (default 1m0s)\n

This will set IdleTimeout in http.Server handling incoming calls from your clients. If you have another loadbalancer layer in front of your Skipper http routers, for example AWS Application Load Balancers, you should make sure, that Skipper\u2019s idle-timeout-server setting is bigger than the idle timeout from the loadbalancer in front. Wrong combinations of idle timeouts can lead to a few unexpected HTTP 502.

-idle-timeout-server duration\n    maximum idle connections per backend host (default 1m0s)\n

This will set MaxHeaderBytes in http.Server to limit the size of the http header from your clients.

-max-header-bytes int\n    set MaxHeaderBytes for http server connections (default 1048576)\n
"},{"location":"operation/operation/#tcp-lifo","title":"TCP LIFO","text":"

Skipper implements now controlling the maximum incoming TCP client connections.

The purpose of the mechanism is to prevent Skipper requesting more memory than available in case of too many concurrent connections, especially in an autoscaling deployment setup, in those case when the scaling is not fast enough to follow sudden connection spikes.

This solution relies on a listener implementation combined with a LIFO queue. It allows only a limited number of connections being handled concurrently, defined by the max concurrency configuration. When the max concurrency limit is reached, the new incoming client connections are stored in a queue. When an active (accepted) connection is closed, the most recent pending connection from the queue will be accepted. When the queue is full, the oldest pending connection is closed and dropped, and the new one is inserted into the queue.

The feature can be enabled with the -enable-tcp-queue flag. The maximum concurrency can bet set with the -max-tcp-listener-concurrency flag, or, if this flag is not set, then Skipper tries to infer the maximum accepted concurrency from the system by reading the /sys/fs/cgroup/memory/memory.limit_in_bytes file. In this case, it uses the average expected per request memory requirement, which can be set with the -expected-bytes-per-request flag.

Note that the automatically inferred limit may not work as expected in an environment other than cgroups v1.

"},{"location":"operation/operation/#oauth2-tokeninfo","title":"OAuth2 Tokeninfo","text":"

OAuth2 filters integrate with external services and have their own connection handling. Outgoing calls to these services have a default timeout of 2s, which can be changed by the flag -oauth2-tokeninfo-timeout=<OAuthTokeninfoTimeout>.

"},{"location":"operation/operation/#oauth2-tokenintrospection-rfc7662","title":"OAuth2 Tokenintrospection RFC7662","text":"

OAuth2 filters integrate with external services and have their own connection handling. Outgoing calls to these services have a default timeout of 2s, which can be changed by the flag -oauth2-tokenintrospect-timeout=<OAuthTokenintrospectionTimeout>.

"},{"location":"operation/operation/#monitoring","title":"Monitoring","text":"

Monitoring is one of the most important things you need to run in production and skipper has a godoc page for the metrics package, describing options and most keys you will find in the metrics handler endpoint. The default is listening on :9911/metrics. You can modify the listen port with the -support-listener flag. Metrics can exposed using formats Codahale (json) or Prometheus and be configured by -metrics-flavour=, which defaults to codahale. To expose both formats you can use a comma separated list: -metrics-flavour=codahale,prometheus.

"},{"location":"operation/operation/#prometheus","title":"Prometheus","text":"

In case you want to get metrics in Prometheus format exposed, use this option to enable it:

-metrics-flavour=prometheus\n

It will return Prometheus metrics on the common metrics endpoint :9911/metrics.

To monitor skipper we recommend the following queries:

  • P99 backend latency: histogram_quantile(0.99, sum(rate(skipper_serve_host_duration_seconds_bucket{}[1m])) by (le))
  • HTTP 2xx rate: histogram_quantile(0.99, sum(rate(skipper_serve_host_duration_seconds_bucket{code =~ \"2.*\"}[1m])) by (le) )
  • HTTP 4xx rate: histogram_quantile(0.99, sum(rate(skipper_serve_host_duration_seconds_bucket{code =~ \"4.*\"}[1m])) by (le) )
  • HTTP 5xx rate: histogram_quantile(0.99, sum(rate(skipper_serve_host_duration_seconds_bucket{code =~ \"52.*\"}[1m])) by (le) )
  • Max goroutines (depends on label selector): max(go_goroutines{application=\"skipper-ingress\"})
  • Max threads (depends on label selector): max(go_threads{application=\"skipper-ingress\"})
  • max heap memory in use in MB (depends on label selector): max(go_memstats_heap_inuse_bytes{application=\"skipper-ingress\"}) / 1024 / 1000
  • Max number of heap objects (depends on label selector): max(go_memstats_heap_objects{application=\"skipper-ingress\"})
  • Max of P75 Go GC runtime in ms (depends on label selector): max(go_gc_duration_seconds{application=\"skipper-ingress\",quantile=\"0.75\"}) * 1000 * 1000
  • P99 request filter duration (depends on label selector): histogram_quantile(0.99, sum(rate(skipper_filter_request_duration_seconds_bucket{application=\"skipper-ingress\"}[1m])) by (le) )
  • P99 response filter duration (depends on label selector): histogram_quantile(0.99, sum(rate(skipper_filter_response_duration_seconds_bucket{application=\"skipper-ingress\"}[1m])) by (le) )
  • If you use Kubernetes limits or Linux cgroup CFS quotas (depends on label selector): sum(rate(container_cpu_cfs_throttled_periods_total{container_name=\"skipper-ingress\"}[1m]))

You may add static metrics labels like version using Prometheus relabeling feature.

"},{"location":"operation/operation/#connection-metrics","title":"Connection metrics","text":"

This option will enable known loadbalancer connections metrics, like counters for active and new connections. This feature sets a metrics callback on http.Server and uses a counter to collect http.ConnState.

-enable-connection-metrics\n    enables connection metrics for http server connections\n

It will expose them in /metrics, for example json structure looks like this example:

{\n  \"counters\": {\n    \"skipper.lb-conn-active\": {\n      \"count\": 6\n    },\n    \"skipper.lb-conn-closed\": {\n      \"count\": 6\n    },\n    \"skipper.lb-conn-idle\": {\n      \"count\": 6\n    },\n    \"skipper.lb-conn-new\": {\n      \"count\": 6\n    }\n  },\n  /* stripped a lot of metrics here */\n}\n
"},{"location":"operation/operation/#lifo-metrics","title":"LIFO metrics","text":"

When enabled in the routes, LIFO queues can control the maximum concurrency level proxied to the backends and mitigate the impact of traffic spikes. The current level of concurrency and the size of the queue can be monitored with gauges per each route using one of the lifo filters. To enable monitoring for the lifo filters, use the command line option:

-enable-route-lifo-metrics\n

When queried, it will return metrics like:

{\n  \"gauges\": {\n    \"skipper.lifo.routeXYZ.active\": {\n      \"value\": 245\n    },\n    \"skipper.lifo.routeXYZ.queued\": {\n      \"value\": 27\n    }\n  }\n}\n
"},{"location":"operation/operation/#application-metrics","title":"Application metrics","text":"

Application metrics for your proxied applications you can enable with the option:

-serve-host-metrics\n    enables reporting total serve time metrics for each host\n-serve-route-metrics\n    enables reporting total serve time metrics for each route\n

This will make sure you will get stats for each \u201cHost\u201d header or the route name as \u201ctimers\u201d. The following is an example for -serve-host-metrics:

\"timers\": {\n  \"skipper.servehost.app1_example_com.GET.200\": {\n    \"15m.rate\": 0.06830666203045982,\n    \"1m.rate\": 2.162612637718806e-06,\n    \"5m.rate\": 0.008312609284452856,\n    \"75%\": 236603815,\n    \"95%\": 236603815,\n    \"99%\": 236603815,\n    \"99.9%\": 236603815,\n    \"count\": 3,\n    \"max\": 236603815,\n    \"mean\": 116515451.66666667,\n    \"mean.rate\": 0.0030589345776699827,\n    \"median\": 91273391,\n    \"min\": 21669149,\n    \"stddev\": 89543653.71950394\n  },\n  \"skipper.servehost.app1_example_com.GET.304\": {\n    \"15m.rate\": 0.3503336738177459,\n    \"1m.rate\": 0.07923086447313292,\n    \"5m.rate\": 0.27019839341602214,\n    \"75%\": 99351895.25,\n    \"95%\": 105381847,\n    \"99%\": 105381847,\n    \"99.9%\": 105381847,\n    \"count\": 4,\n    \"max\": 105381847,\n    \"mean\": 47621612,\n    \"mean.rate\": 0.03087161486272533,\n    \"median\": 41676170.5,\n    \"min\": 1752260,\n    \"stddev\": 46489302.203724876\n  },\n  \"skipper.servehost.app1_example_com.GET.401\": {\n    \"15m.rate\": 0.16838468990057648,\n    \"1m.rate\": 0.01572861413072501,\n    \"5m.rate\": 0.1194724817779537,\n    \"75%\": 91094832,\n    \"95%\": 91094832,\n    \"99%\": 91094832,\n    \"99.9%\": 91094832,\n    \"count\": 2,\n    \"max\": 91094832,\n    \"mean\": 58090623,\n    \"mean.rate\": 0.012304914018033056,\n    \"median\": 58090623,\n    \"min\": 25086414,\n    \"stddev\": 33004209\n  }\n},\n

Note you can reduce the dimension of the metrics by removing the HTTP status code and method from it. Use the -serve-method-metric=false and/or -serve-status-code-metric=false. Both flags are enabled by default. For prometheus metrics flavour, a counter with both the HTTP method and status code can be enabled with -serve-host-counter or -serve-route-counter, even if these flags are disabled.

To change the sampling type of how metrics are handled from uniform to exponential decay, you can use the following option, which is better for not so huge utilized applications (less than 100 requests per second):

-metrics-exp-decay-sample\n    use exponentially decaying sample in metrics\n
"},{"location":"operation/operation/#go-metrics","title":"Go metrics","text":"

Metrics from the go runtime memstats are exposed from skipper to the metrics endpoint, default listener :9911, on path /metrics

"},{"location":"operation/operation/#go-metrics-codahale","title":"Go metrics - Codahale","text":"
\"gauges\": {\n  \"skipper.runtime.MemStats.Alloc\": {\n    \"value\": 3083680\n  },\n  \"skipper.runtime.MemStats.BuckHashSys\": {\n    \"value\": 1452675\n  },\n  \"skipper.runtime.MemStats.DebugGC\": {\n    \"value\": 0\n  },\n  \"skipper.runtime.MemStats.EnableGC\": {\n    \"value\": 1\n  },\n  \"skipper.runtime.MemStats.Frees\": {\n    \"value\": 121\n  },\n  \"skipper.runtime.MemStats.HeapAlloc\": {\n    \"value\": 3083680\n  },\n  \"skipper.runtime.MemStats.HeapIdle\": {\n    \"value\": 778240\n  },\n  \"skipper.runtime.MemStats.HeapInuse\": {\n    \"value\": 4988928\n  },\n  \"skipper.runtime.MemStats.HeapObjects\": {\n    \"value\": 24005\n  },\n  \"skipper.runtime.MemStats.HeapReleased\": {\n    \"value\": 0\n  },\n  \"skipper.runtime.MemStats.HeapSys\": {\n    \"value\": 5767168\n  },\n  \"skipper.runtime.MemStats.LastGC\": {\n    \"value\": 1516098381155094500\n  },\n  \"skipper.runtime.MemStats.Lookups\": {\n    \"value\": 2\n  },\n  \"skipper.runtime.MemStats.MCacheInuse\": {\n    \"value\": 6944\n  },\n  \"skipper.runtime.MemStats.MCacheSys\": {\n    \"value\": 16384\n  },\n  \"skipper.runtime.MemStats.MSpanInuse\": {\n    \"value\": 77368\n  },\n  \"skipper.runtime.MemStats.MSpanSys\": {\n    \"value\": 81920\n  },\n  \"skipper.runtime.MemStats.Mallocs\": {\n    \"value\": 1459\n  },\n  \"skipper.runtime.MemStats.NextGC\": {\n    \"value\": 4194304\n  },\n  \"skipper.runtime.MemStats.NumGC\": {\n    \"value\": 0\n  },\n  \"skipper.runtime.MemStats.PauseTotalNs\": {\n    \"value\": 683352\n  },\n  \"skipper.runtime.MemStats.StackInuse\": {\n    \"value\": 524288\n  },\n  \"skipper.runtime.MemStats.StackSys\": {\n    \"value\": 524288\n  },\n  \"skipper.runtime.MemStats.Sys\": {\n    \"value\": 9246968\n  },\n  \"skipper.runtime.MemStats.TotalAlloc\": {\n    \"value\": 35127624\n  },\n  \"skipper.runtime.NumCgoCall\": {\n    \"value\": 0\n  },\n  \"skipper.runtime.NumGoroutine\": {\n    \"value\": 11\n  },\n  \"skipper.runtime.NumThread\": {\n    \"value\": 9\n  }\n},\n\"histograms\": {\n  \"skipper.runtime.MemStats.PauseNs\": {\n    \"75%\": 82509.25,\n    \"95%\": 132609,\n    \"99%\": 132609,\n    \"99.9%\": 132609,\n    \"count\": 12,\n    \"max\": 132609,\n    \"mean\": 56946,\n    \"median\": 39302.5,\n    \"min\": 28749,\n    \"stddev\": 31567.015005117817\n  }\n}\n
"},{"location":"operation/operation/#go-metrics-prometheus","title":"Go metrics - Prometheus","text":"
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 4.7279e-05\ngo_gc_duration_seconds{quantile=\"0.25\"} 5.9291e-05\ngo_gc_duration_seconds{quantile=\"0.5\"} 7.4e-05\ngo_gc_duration_seconds{quantile=\"0.75\"} 9.55e-05\ngo_gc_duration_seconds{quantile=\"1\"} 0.000199667\ngo_gc_duration_seconds_sum 0.001108339\ngo_gc_duration_seconds_count 13\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 13\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.21.3\"} 1\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 6.4856e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 4.1797384e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.462151e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 507460\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 4.549296e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 6.4856e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 7.421952e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 8.372224e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 70159\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 6.47168e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.5794176e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.6987664839728708e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 577619\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 19200\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 31200\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 302904\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 309624\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 8.206808e+06\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 2.402169e+06\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 983040\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 983040\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.5531656e+07\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 22\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 0.42\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 60000\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 10\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 4.2811392e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.69876646736e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 2.823462912e+09\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n
"},{"location":"operation/operation/#redis-rate-limiting-metrics","title":"Redis - Rate limiting metrics","text":"

System metrics exposed by the redisclient:

  • skipper.swarm.redis.shards: known Redis shards to the skipper ringclient
  • skipper.swarm.redis.hits: number of times free connection was found in the pool
  • skipper.swarm.redis.misses: number of times free connection was NOT found in the pool
  • skipper.swarm.redis.timeouts: number of times a wait timeout occurred
  • skipper.swarm.redis.staleconns: number of stale connections removed from the pool
  • skipper.swarm.redis.idleconns: number of idle connections in the pool
  • skipper.swarm.redis.totalconns: number of total connections in the pool

Timer metrics for the latencies and errors of the communication with the auxiliary Redis instances are enabled by the default, and exposed among the timers via the following keys:

  • skipper.swarm.redis.query.allow.success: successful allow requests to the rate limiter, ungrouped
  • skipper.swarm.redis.query.allow.failure: failed allow requests to the rate limiter, ungrouped, where the redis communication failed
  • skipper.swarm.redis.query.retryafter.success.: successful allow requests to the rate limiter, grouped by the rate limiter group name when used
  • skipper.swarm.redis.query.retryafter.failure.: failed allow requests to the rate limiter, ungrouped, where the redis communication faileds, grouped by the rate limiter group name when used

    See more details about rate limiting at Rate limiting.

    "},{"location":"operation/operation/#open-policy-agent-metrics","title":"Open Policy Agent metrics","text":"

    If Open Policy Agent filters are enabled, the following counters show up in the /metrics endpoint. The bundle-name is the first parameter of the filter so that for example increased error codes can be attributed to a specific source bundle / system.

    • skipper.opaAuthorizeRequest.custom.decision.allow.<bundle-name>
    • skipper.opaAuthorizeRequest.custom.decision.deny.<bundle-name>
    • skipper.opaAuthorizeRequest.custom.decision.err.<bundle-name>
    • skipper.opaServeResponse.custom.decision.allow.<bundle-name>
    • skipper.opaServeResponse.custom.decision.deny.<bundle-name>
    • skipper.opaServeResponse.custom.decision.err.<bundle-name>

    The following timer metrics are exposed per used bundle-name:

    • skipper.opaAuthorizeRequest.custom.eval_time.<bundle-name>
    • skipper.opaServeResponse.custom.eval_time.<bundle-name>
    "},{"location":"operation/operation/#routesrv-metrics","title":"RouteSRV metrics","text":"

    RouteSRV metrics expose the following metrics in Prometheus format:

    % curl http://127.0.0.1:9911/metrics\n# 8< Go metrics >8\n\n# HELP routesrv_backend_combined_duration_seconds Duration in seconds of a proxy backend combined.\n# TYPE routesrv_backend_combined_duration_seconds histogram\nroutesrv_backend_combined_duration_seconds_bucket{le=\"0.005\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"0.01\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"0.025\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"0.05\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"0.1\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"0.25\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"0.5\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"1\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"2.5\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"5\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"10\"} 5\nroutesrv_backend_combined_duration_seconds_bucket{le=\"+Inf\"} 5\nroutesrv_backend_combined_duration_seconds_sum 0.001349441\nroutesrv_backend_combined_duration_seconds_count 5\n# HELP routesrv_backend_duration_seconds Duration in seconds of a proxy backend.\n# TYPE routesrv_backend_duration_seconds histogram\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"0.005\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"0.01\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"0.025\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"0.05\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"0.1\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"0.25\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"0.5\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"1\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"2.5\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"5\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"10\"} 5\nroutesrv_backend_duration_seconds_bucket{host=\"\",route=\"routersv\",le=\"+Inf\"} 5\nroutesrv_backend_duration_seconds_sum{host=\"\",route=\"routersv\"} 0.001349441\nroutesrv_backend_duration_seconds_count{host=\"\",route=\"routersv\"} 5\n# HELP routesrv_custom_gauges Gauges number of custom metrics.\n# TYPE routesrv_custom_gauges gauge\nroutesrv_custom_gauges{key=\"polling_started_timestamp\"} 1.69876646881321e+09\nroutesrv_custom_gauges{key=\"redis_endpoints\"} 1\nroutesrv_custom_gauges{key=\"routes.byte\"} 91378\nroutesrv_custom_gauges{key=\"routes.initialized_timestamp\"} 1.6987664689696188e+09\nroutesrv_custom_gauges{key=\"routes.total\"} 258\nroutesrv_custom_gauges{key=\"routes.updated_timestamp\"} 1.698766468969631e+09\n# HELP routesrv_custom_total Total number of custom metrics.\n# TYPE routesrv_custom_total counter\nroutesrv_custom_total{key=\"200\"} 5\n

    Metrics explanation:

    • routesrv_custom_total{key=\"200\"} 5: 5 requests were responded with status code 200 by the current routesrv version v0.18.38.
    • routesrv_custom_gauges{key=\"polling_started_timestamp\"} 1.69876646881321e+09: routesrv started to poll at 1.69876646881321e+09 seconds of UNIX beginning (2023-10-31 16:34:28 1705425/2097152 +0100).
    • routesrv_custom_gauges{key=\"redis_endpoints\"} 1: The routes endpoint /swarm/redis/shards was called 1 times
    • routesrv_custom_gauges{key=\"routes.byte\"} 91378: The number of bytes that are served at /routes is 91378.
    • routesrv_custom_gauges{key=\"routes.initialized_timestamp\"} 1.6987664689696188e+09: routesrv initialized the routes at 1.6987664689696188e+09 seconds of UNIX beginning. (2023-10-31 16:34:28 1016719/1048576 +0100)
    • routesrv_custom_gauges{key=\"routes.total\"} 258: The number of routes that are served at /routes are 258.
    • routesrv_custom_gauges{key=\"routes.updated_timestamp\"} 1.698766468969631e+09: The last update of routes by routesrv was at 1.698766468969631e+09. (2023-10-31 16:34:28 4066927/4194304 +0100)

    If you want to read more about RouteSRV see deploy RouteSRV.

    "},{"location":"operation/operation/#opentracing","title":"OpenTracing","text":"

    Skipper has support for different OpenTracing API vendors, including jaeger, lightstep and instana.

    You can configure tracing implementations with a flag and pass information and tags to the tracer:

    -opentracing=<vendor> component-name=skipper-ingress ... tag=cluster=mycluster ...\n

    The best tested tracer is the lightstep tracer, because we use it in our setup. In case you miss something for your chosen tracer, please open an issue or pull request in our repository.

    Skipper creates up to 5 different spans:

    Some Tag details are added to all spans.

    "},{"location":"operation/operation/#ingress-span","title":"Ingress span","text":"

    The Ingress span is active from getting the request in Skipper\u2019s main http handler, until we served the response to the client of the request.

    Tags:

    • component: skipper
    • hostname: ip-10-149-64-142
    • http.host: hostname.example.org
    • http.method: GET
    • http.path: /
    • http.remote_addr: 10.149.66.207:14574
    • http.url: /
    • span.kind: server

    "},{"location":"operation/operation/#proxy-span","title":"Proxy span","text":"

    The Proxy span starts just before executing the backend call.

    Tags:

    • component: skipper
    • hostname: ip-10-149-65-70
    • http.host: hostname.example.org
    • http.method: GET
    • http.path: /
    • http.remote_addr:
    • http.status_code: 200
    • http.url: http://10.2.0.11:9090/
    • skipper.route_id: kube_default__example_ingress_hostname_example_org____example_backend
    • span.kind: client

    Proxy span has logs to measure connect (dial_context), http roundtrip (http_roundtrip), stream headers from backend to client (stream_Headers), stream body from backend to client (streamBody.byte) and events by the Go runtime.

    In addition to the manual instrumented proxy client logs, we use net/http/httptrace.ClientTrace to show events by the Go runtime. Full logs of the Proxy span:

    • http_roundtrip: \"start\": just before http roundtrip
    • http_roundtrip: \"end\": just after http roundtrip
    • get_conn: \"start\": try to get a connection from the connection pool httptrace.ClientTrace
    • get_conn: \"end\": got a connection from the connection pool httptrace.ClientTrace
    • DNS: \"start\": try to resolve DNS httptrace.ClientTrace
    • DNS: \"end\": got an IP httptrace.ClientTrace
    • TLS: \"start\": start TLS connection httptrace.ClientTrace
    • TLS: \"end\": established TLS connection httptrace.ClientTrace
    • connect: \"start\": start to establish TCP/IP connection httptrace.ClientTrace
    • connect: \"end\": established TCP/IP connection httptrace.ClientTrace
    • wrote_headers: \"done\": wrote HTTP Headers into the socket httptrace.ClientTrace
    • wrote_request: \"done\": wrote full HTTP Request into the socket httptrace.ClientTrace
    • got_first_byte: \"done\": Got first byte of the HTTP response from the backend httptrace.ClientTrace

    "},{"location":"operation/operation/#request-filters-span","title":"Request filters span","text":"

    The request filters span logs show start and end events for each filter applied.

    "},{"location":"operation/operation/#response-filters-span","title":"Response filters span","text":"

    The response filters span logs show start and end events for each filter applied.

    Request and response filters event logging can be disabled by setting the -opentracing-log-filter-lifecycle-events=false flag and span creation can be disabled altogether by the -opentracing-disable-filter-spans flag.

    "},{"location":"operation/operation/#auth-filters-span","title":"Auth filters span","text":"

    Auth filters are special, because they might call an authorization endpoint, which should be also visible in the trace. This span can have the name \u201ctokeninfo\u201d, \u201ctokenintrospection\u201d or \u201cwebhook\u201d depending on the filter used by the matched route.

    Tags: - http.url: https://auth.example.org

    The auth filters have trace log values start and end for DNS, TCP connect, TLS handshake and connection pool:

    "},{"location":"operation/operation/#open-policy-agent-span","title":"Open Policy Agent span","text":"

    When one of the Open Policy Agent filters is used, child spans with the operation name open-policy-agent are added to the Trace.

    The following tags are added to the Span, labels are taken from the OPA configuration YAML file as is and are not interpreted: - opa.decision_id=<decision id that was recorded> - opa.labels.<label1>=<value1>

    The labels can for example be used to link to a specific decision in the control plane if they contain URL fragments for the receiving entity.

    "},{"location":"operation/operation/#redis-rate-limiting-spans","title":"Redis rate limiting spans","text":""},{"location":"operation/operation/#operation-redis_allow_check_card","title":"Operation: redis_allow_check_card","text":"

    Operation executed when the cluster rate limiting relies on the auxiliary Redis instances, and the Allow method checks if the rate exceeds the configured limit.

    "},{"location":"operation/operation/#operation-redis_allow_add_card","title":"Operation: redis_allow_add_card","text":"

    Operation setting the counter of the measured request rate for cluster rate limiting with auxiliary Redis instances.

    "},{"location":"operation/operation/#operation-redis_oldest_score","title":"Operation: redis_oldest_score","text":"

    Operation querying the oldest request event for the rate limiting Retry-After header with cluster rate limiting when used with auxiliary Redis instances.

    "},{"location":"operation/operation/#dataclient","title":"Dataclient","text":"

    Dataclients poll some kind of data source for routes. To change the timeout for calls that polls a dataclient, which could be the Kubernetes API, use the following option:

    -source-poll-timeout int\n    polling timeout of the routing data sources, in milliseconds (default 3000)\n
    "},{"location":"operation/operation/#routing-table-information","title":"Routing table information","text":"

    Skipper allows you to get some runtime insights. You can get the current routing table from skipper with in the eskip file format:

    curl localhost:9911/routes\n*\n-> \"http://localhost:12345/\"\n

    You also can get the number of routes X-Count and the UNIX timestamp of the last route table update X-Timestamp, using a HEAD request:

    curl -I localhost:9911/routes\nHTTP/1.1 200 OK\nContent-Type: text/plain\nX-Count: 1\nX-Timestamp: 1517777628\nDate: Sun, 04 Feb 2018 20:54:31 GMT\n

    The number of routes given is limited (1024 routes by default). In order to control this limits, there are two parameters: limit and offset. The limit defines the number of routes to get and offset where to start the list. Thanks to this, it\u2019s possible to get the results paginated or getting all of them at the same time.

    curl localhost:9911/routes?offset=200&limit=100\n
    "},{"location":"operation/operation/#passive-health-check-experimental","title":"Passive health check (experimental)","text":"

    Skipper has an option to automatically detect and mitigate faulty backend endpoints, this feature is called Passive Health Check(PHC).

    PHC works the following way: the entire uptime is divided in chunks of period, per every period Skipper calculates the total amount of requests and amount of requests failed per every endpoint. While next period is going on, the Skipper takes a look at previous period and if the amount of requests in the previous period is more than min-requests and failed requests ratio is more than min-drop-probability for the given endpoints then Skipper will send reduced (the more max-drop-probability and failed requests ratio in previous period are, the stronger reduction is) amount of requests compared to amount sent without PHC. If the ratio of unhealthy endpoints is more than max-unhealthy-endpoints-ratio then PHC becomes fail-open. This effectively means if there are too many unhealthy endpoints PHC does not try to mitigate them any more and requests are sent like there is no PHC at all.

    Having this, we expect less requests to fail because a lot of them would be sent to endpoints that seem to be healthy instead.

    To enable this feature, you need to provide -passive-health-check option having forementioned parameters (period, min-requests, min-drop-probability, max-drop-probability, max-unhealthy-endpoints-ratio) defined. period, min-requests, max-drop-probability are required parameters, it is not possible for PHC to be enabled without them explicitly defined by user. min-drop-probability is implicitly defined as 0.0 if not explicitly set by user. max-unhealthy-endpoints-ratio is defined as 1.0 if not explicitly set by user. Valid examples of -passive-health-check are:

    • -passive-health-check=period=1s,min-requests=10,min-drop-probability=0.05,max-drop-probability=0.9,max-unhealthy-endpoints-ratio=0.3
    • -passive-health-check=period=1s,min-requests=10,max-drop-probability=0.9,max-unhealthy-endpoints-ratio=0.3
    • -passive-health-check=period=1s,min-requests=10,min-drop-probability=0.05,max-drop-probability=0.9
    • -passive-health-check=period=1s,min-requests=10,max-drop-probability=0.9

    If -passive-health-check option is provided, but some required parameters are not defined, Skipper will not start. Skipper will run without this feature, if no -passive-health-check is provided at all.

    The parameters of -passive-health-check option are:

    • period=<duration> - the duration of stats reset period
    • min-requests=<int> - the minimum number of requests per period per backend endpoint required to activate PHC for this endpoint
    • min-drop-probabilty=[0.0 <= p < max-drop-probability) - the minimum possible probability of unhealthy endpoint being not considered while choosing the endpoint for the given request. The same value is in fact used as minimal failed requests ratio for PHC to be enabled for this endpoint
    • max-drop-probabilty=(min-drop-probability < p <= 1.0] - the maximum possible probability of unhealthy endpoint being not considered while choosing the endpoint for the given request
    • max-unhealthy-endpoints-ratio=[0.0 <= r <= 1.0] - the maximum ratio of unhealthy endpoints for PHC to try to mitigate ongoing requests
    "},{"location":"operation/operation/#metrics","title":"Metrics","text":"

    A set of metrics will be exposed to track passive health check:

    • passive-health-check.endpoints.dropped: Number of all endpoints dropped before load balancing a request, so after N requests and M endpoints are being dropped this counter would be N*M.
    • passive-health-check.requests.passed: Number of unique requests where PHC was able to avoid sending them to unhealthy endpoints.
    "},{"location":"operation/operation/#memory-consumption","title":"Memory consumption","text":"

    While Skipper is generally not memory bound, some features may require some attention and planning regarding the memory consumption.

    Potentially high memory consumers:

    • Metrics
    • Filters
    • Slow Backends and chatty clients

    Make sure you monitor backend latency, request and error rates. Additionally use Go metrics for the number of goroutines and threads, GC pause times should be less than 1ms in general, route lookup time, request and response filter times and heap memory.

    "},{"location":"operation/operation/#metrics_1","title":"Metrics","text":"

    Memory consumption of metrics are dependent on enabled command line flags. Make sure to monitor Go metrics.

    If you use -metrics-flavour=codahale,prometheus you enable both storage backends.

    If you use the Prometheus histogram buckets -histogram-metric-buckets.

    If you enable route based -route-backend-metrics -route-response-metrics -serve-route-metrics, error codes -route-response-metrics and host -serve-host-metrics based metrics it can count up. Please check the support listener endpoint (default 9911) to understand the usage:

    % curl localhost:9911/metrics\n

    By default, the route and host metrics include the labels for the request HTTP response status code and the HTTP method. You can customize it by setting -serve-method-metric=false and/or -serve-status-code-metric=false. These two flags will enable or disable the method and status code labels from your metrics reducing the number of metrics generated and memory consumption.

    "},{"location":"operation/operation/#filters","title":"Filters","text":"

    Ratelimit filter clusterClientRatelimit implementation using the swim based protocol, consumes roughly 15MB per filter for 100.000 individual clients and 10 maximum hits. Make sure you monitor Go metrics. Ratelimit filter clusterClientRatelimit implementation using the Redis ring based solution, adds 2 additional roundtrips to redis per hit. Make sure you monitor redis closely, because skipper will fallback to allow traffic if redis can not be reached.

    "},{"location":"operation/operation/#slow-backends","title":"Slow Backends","text":"

    Skipper has to keep track of all active connections and http Requests. Slow Backends can pile up in number of connections, that will consume each a little memory per request. If you have high traffic per instance and a backend times out it can start to increase your memory consumption. Make sure you monitor backend latency, request and error rates.

    "},{"location":"operation/operation/#default-filters","title":"Default Filters","text":"

    Default filters will be applied to all routes created or updated.

    "},{"location":"operation/operation/#global-default-filters","title":"Global Default Filters","text":"

    Global default filters can be specified via two different command line flags -default-filters-prepend and -default-filters-append. Filters passed to these command line flags will be applied to all routes. The difference prepend and append is where in the filter chain these default filters are applied.

    For example a user specified the route: r: * -> setPath(\"/foo\") If you run skipper with -default-filters-prepend=enableAccessLog(4,5) -> lifo(100,100,\"10s\"), the actual route will look like this: r: * -> enableAccessLog(4,5) -> lifo(100,100,\"10s\") -> setPath(\"/foo\"). If you run skipper with -default-filters-append=enableAccessLog(4,5) -> lifo(100,100,\"10s\"), the actual route will look like this: r: * -> setPath(\"/foo\") -> enableAccessLog(4,5) -> lifo(100,100,\"10s\").

    "},{"location":"operation/operation/#kubernetes-default-filters","title":"Kubernetes Default Filters","text":"

    Kubernetes dataclient supports default filters. You can enable this feature by specifying default-filters-dir. The defined directory must contain per-service filter configurations, with file name following the pattern ${service}.${namespace}. The content of the files is the actual filter configurations. These filters are then prepended to the filters already defined in Ingresses.

    The default filters are supposed to be used only if the filters of the same kind are not configured on the Ingress resource. Otherwise, it can and will lead to potentially contradicting filter configurations and race conditions, i.e. you should specify a specific filter either on the Ingress resource or as a default filter.

    "},{"location":"operation/operation/#scheduler","title":"Scheduler","text":"

    HTTP request schedulers change the queuing behavior of in-flight requests. A queue has two generic properties: a limit of requests and a concurrency level. The limit of request can be unlimited (unbounded queue), or limited (bounded queue). The concurrency level is either limited or unlimited.

    The default scheduler is an unbounded first in first out (FIFO) queue, that is provided by Go\u2019s standard library.

    Skipper provides 2 last in first out (LIFO) filters to change the scheduling behavior.

    On failure conditions, Skipper will return HTTP status code:

    • 503 if the queue is full, which is expected on the route with a failing backend
    • 502 if queue access times out, because the queue access was not fast enough
    • 500 on unknown errors, please create an issue
    "},{"location":"operation/operation/#the-problem","title":"The problem","text":"

    Why should you use boundaries to limit concurrency level and limit the queue?

    The short answer is resiliency. If you have one route, that is timing out, the request queue of skipper will pile up and consume much more memory, than before. This can lead to out of memory kill, which will affect all other routes. In this comment you can see the memory usage increased in Go\u2019s standard library bufio package.

    Why LIFO queue instead of FIFO queue?

    In normal cases the queue should not contain many requests. Skipper is able to process many requests concurrently without letting the queue piling up. In overrun situations you might want to process at least some fraction of requests instead of timing out all requests. LIFO would not time out all requests within the queue, if the backend is capable of responding some requests fast enough.

    "},{"location":"operation/operation/#a-solution","title":"A solution","text":"

    Skipper has two filters lifo() and lifoGroup(), that can limit the number of requests for a route. A documented load test shows the behavior with an enabled lifo(100,100,\"10s\") filter for all routes, that was added by default. You can do this, if you pass the following flag to skipper: -default-filters-prepend=lifo(100,100,\"10s\").

    Both LIFO filters will, use a last in first out queue to handle most requests fast. If skipper is in an overrun mode, it will serve some requests fast and some will timeout. The idea is based on Dropbox bandaid proxy, which is not opensource. Dropbox shared their idea in a public blogpost.

    Skipper\u2019s scheduler implementation makes sure, that one route will not interfere with other routes, if these routes are not in the same scheduler group. LifoGroup has a user chosen scheduler group and lifo() will get a per route unique scheduler group.

    "},{"location":"operation/operation/#uri-standards-interpretation","title":"URI standards interpretation","text":"

    Considering the following request path: /foo%2Fbar, Skipper can handle it in two different ways. The current default way is that when the request is parsed purely relying on the Go stdlib url package, this path becomes /foo/bar. According to RFC 2616 and RFC 3986, this may be considered wrong, and this path should be parsed as /foo%2Fbar. This is possible to achieve centrally, when Skipper is started with the -rfc-patch-path flag. It is also possible to allow the default behavior and only force the alternative interpretation on a per-route basis with the rfcPath() filter. See rfcPath().

    If the second interpretation gets considered the right way, and the other one a bug, then the default value for this flag may become to be on.

    "},{"location":"operation/operation/#debugging-requests","title":"Debugging Requests","text":"

    Skipper provides filters, that can change HTTP requests. You might want to inspect how the request was changed, during the route processing and check the request that would be made to the backend. Luckily with -debug-listener=:9922, Skipper can provide you this information.

    For example you have the following route:

    kube_default__foo__foo_teapot_example_org_____foo: Host(/^foo[.]teapot[.]example[.]org$/) && PathSubtree(\"/\")\n  -> setRequestHeader(\"X-Foo\", \"hello-world\")\n  -> <roundRobin, \"http://10.2.0.225:9090\", \"http://10.2.1.244:9090\">;\n

    If you sent now a request to the debug listener, that will be matched by the route, Skipper will respond with information that show you the matched route, the incoming request, the transformed request and all predicates and filters involved in the route processing:

    % curl -s http://127.0.0.1:9922/ -H\"Host: foo.teapot.example.org\" | jq .\n{\n  \"route_id\": \"kube_default__foo__foo_teapot_example_org_____foo\",\n  \"route\": \"Host(/^foo[.]teapot[.]example[.]org$/) && PathSubtree(\\\"/\\\") -> setRequestHeader(\\\"X-Foo\\\", \\\"hello-world\\\") -> <roundRobin, \\\"http://10.2.0.225:9090\\\", \\\"http://10.2.1.244:9090\\\">\",\n  \"incoming\": {\n    \"method\": \"GET\",\n    \"uri\": \"/\",\n    \"proto\": \"HTTP/1.1\",\n    \"header\": {\n      \"Accept\": [\n        \"*/*\"\n      ],\n      \"User-Agent\": [\n        \"curl/7.49.0\"\n      ]\n    },\n    \"host\": \"foo.teapot.example.org\",\n    \"remote_address\": \"127.0.0.1:32992\"\n  },\n  \"outgoing\": {\n    \"method\": \"GET\",\n    \"uri\": \"\",\n    \"proto\": \"HTTP/1.1\",\n    \"header\": {\n      \"Accept\": [\n        \"*/*\"\n      ],\n      \"User-Agent\": [\n        \"curl/7.49.0\"\n      ],\n      \"X-Foo\": [\n        \"hello-world\"\n      ]\n    },\n    \"host\": \"foo.teapot.example.org\"\n  },\n  \"response_mod\": {\n    \"header\": {\n      \"Server\": [\n        \"Skipper\"\n      ]\n    }\n  },\n  \"filters\": [\n    {\n      \"name\": \"setRequestHeader\",\n      \"args\": [\n        \"X-Foo\",\n        \"hello-world\"\n      ]\n    }\n  ],\n  \"predicates\": [\n    {\n      \"name\": \"PathSubtree\",\n      \"args\": [\n        \"/\"\n      ]\n    }\n  ]\n}\n
    "},{"location":"operation/operation/#profiling","title":"Profiling","text":"

    Go profiling is explained in Go\u2019s diagnostics documentation.

    "},{"location":"operation/operation/#profiling-skipper-or-routesrv","title":"Profiling skipper or RouteSRV","text":"

    To enable profiling in skipper you have to use -enable-profile. This will start a profiling route at /debug/pprof/profile on the support listener, which defaults to :9911.

    "},{"location":"operation/operation/#profiling-example","title":"Profiling example","text":"

    Start skipper with enabled profiling:

    skipper -inline-routes='r1: * -> inlineContent(\"hello\") -> <shunt>' -enable-profile\n

    Use Go tool pprof to download profiling sample to analyze (sample is not from the example):

    % go tool pprof http://127.0.0.1:9911\nFetching profile over HTTP from http://127.0.0.1:9911/debug/pprof/profile\nSaved profile in /$HOME/pprof/pprof.skipper.samples.cpu.004.pb.gz\nFile: skipper\nBuild ID: 272c31a7bd60c9fabb637bdada37a3331a919b01\nType: cpu\nTime: Oct 7, 2020 at 6:17pm (CEST)\nDuration: 30s, Total samples = 0\nNo samples were found with the default sample value type.\nTry \"sample_index\" command to analyze different sample values.\nEntering interactive mode (type \"help\" for commands, \"o\" for options)\n(pprof) top\nShowing nodes accounting for 2140ms, 50.00% of 4280ms total\nDropped 330 nodes (cum <= 21.40ms)\nShowing top 10 nodes out of 303\n      flat  flat%   sum%        cum   cum%\n     560ms 13.08% 13.08%      640ms 14.95%  syscall.Syscall\n     420ms  9.81% 22.90%      430ms 10.05%  runtime.nanotime\n     410ms  9.58% 32.48%      410ms  9.58%  runtime.futex\n     170ms  3.97% 36.45%      450ms 10.51%  runtime.mallocgc\n     170ms  3.97% 40.42%      180ms  4.21%  runtime.walltime\n     160ms  3.74% 44.16%      220ms  5.14%  runtime.scanobject\n      80ms  1.87% 46.03%       80ms  1.87%  runtime.heapBitsSetType\n      70ms  1.64% 47.66%       70ms  1.64%  runtime.epollwait\n      50ms  1.17% 48.83%      120ms  2.80%  compress/flate.(*compressor).deflate\n      50ms  1.17% 50.00%       50ms  1.17%  encoding/json.stateInString\n(pprof) web\n--> opens browser with SVG\n

    "},{"location":"operation/operation/#response-serving","title":"Response serving","text":"

    When serving a response from a backend, Skipper serves first the HTTP response headers. After that Skipper streams the response payload and uses one 8kB buffer to stream the data through this 8kB buffer. It uses Flush() to make sure the 8kB chunk is written to the client. Details can be observed by opentracing in the logs of the Proxy Span.

    "},{"location":"operation/operation/#forwarded-headers","title":"Forwarded headers","text":"

    Skipper can be configured to add X-Forwarded-* headers:

      -forwarded-headers value\n        comma separated list of headers to add to the incoming request before routing\n        X-Forwarded-For sets or appends with comma the remote IP of the request to the X-Forwarded-For header value\n        X-Forwarded-Host sets X-Forwarded-Host value to the request host\n        X-Forwarded-Port=<port> sets X-Forwarded-Port value\n        X-Forwarded-Proto=<http|https> sets X-Forwarded-Proto value\n  -forwarded-headers-exclude-cidrs value\n        disables addition of forwarded headers for the remote host IPs from the comma separated list of CIDRs\n
    "},{"location":"operation/operation/#converting-routes","title":"Converting Routes","text":"

    For migrations you need often to convert X to Y. This is also true in case you want to switch one predicate to another one or one filter to another one. In skipper we have -edit-route and -clone-route that either modifies matching routes or copy matching routes and change the copy.

    Example:

    A route with edit-route

    % skipper -inline-routes='Path(\"/foo\") -> setResponseHeader(\"X-Foo\",\"bar\") -> inlineContent(\"hi\") -> <shunt>' \\\n-edit-route='/inlineContent[(][\"](.*)[\"][)]/inlineContent(\"modified \\\"$1\\\" response\")/'\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] route settings, reset, route: : Path(\"/foo\") -> setResponseHeader(\"X-Foo\", \"bar\") -> inlineContent(\"hi\") -> <shunt>\n[APP]INFO[0000] proxy listener on :9090\n[APP]INFO[0000] TLS settings not found, defaulting to HTTP\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] route settings applied\n

    Modified route:

    curl localhost:9911/routes\nPath(\"/foo\")\n  -> setResponseHeader(\"X-Foo\", \"bar\")\n  -> inlineContent(\"modified \\\"hi\\\" response\")\n  -> <shunt>\n

    Modified response body:

    % curl -v http://localhost:9090/foo\n*   Trying ::1...\n* Connected to localhost (::1) port 9090 (#0)\n> GET /foo HTTP/1.1\n> Host: localhost:9090\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Content-Length: 22\n< Content-Type: text/plain; charset=utf-8\n< Server: Skipper\n< X-Foo: bar\n< Date: Thu, 14 Oct 2021 08:41:53 GMT\n<\n* Connection #0 to host localhost left intact\nmodified \"hi\" response\n

    With edit-route and -clone-route you can modify Predicates and Filters to convert from SourceFromLast() to ClientIP, for example if you want to migrate AWS cloud load balancer from Application Load Balancer to Network Load Balancer, you can use -clone-route='/SourceFromLast[(](.*)[)]/ClientIP($1)/' to create additional routes for

    r: SourceFromLast(\"9.0.0.0/8\",\"2001:67c:20a0::/48\") -> ...`\n
    to change to
    r: SourceFromLast(\"9.0.0.0/8\",\"2001:67c:20a0::/48\") -> ...`\nclone_r: ClientIP(\"9.0.0.0/8\",\"2001:67c:20a0::/48\") -> ...`\n
    for migration time.

    / symbol is not the only option for the separator for -edit-route and -clone-route, any first symbol you will specify in those options could be used as separator. This could be useful for IP mask changes, for example, you can use -edit-route='#/26#/24#. In this case

    r: SourceFromLast(\"9.0.0.0/26\",\"2001:67c:20a0::/48\") -> ...`\n
    will be changed to
    r: SourceFromLast(\"9.0.0.0/24\",\"2001:67c:20a0::/48\") -> ...`\n

    "},{"location":"reference/architecture/","title":"Architecture","text":"

    Skipper is written as a library and is also a multi binary project with 2 binaries, named skipper and eskip. Skipper is the HTTP proxy and eskip is a CLI application to verify, print, update or delete Skipper routes.

    Skipper\u2019s internal architecture is split into different packages. The skipper package has connections to multiple dataclient, that pull information from different sources, for example static routes from an eskip file or dynamic routes from Kubernetes ingress objects.

    The proxy package gets the routes populated by skipper and has always a current routing table which will be replaced on change.

    A route is one entry in the routing table. A route consists of one or more predicate, that are used to find a route for a given HTTP request. A route can also have one or more filter, that can modify the content of the request or response. A route can point to a backend, it can be a <shunt>, meaning that skipper serves the requests for the route, a <loopback>, meaning that the requests will be matched against the routing table again after filters have modified them, or a <dynamic>, meaning that the target url can be set dynamically by a filter (e.g. setDynamicBackendUrl).

    Opentracing API is supported via skipper-plugins. For example Jaeger is supported.

    Skipper has a rich set of metrics that are exposed as json, but can be exported in Prometheus format.

    "},{"location":"reference/architecture/#route-processing","title":"Route processing","text":"

    Package skipper has a Go http.Server and does the ListenAndServe call with the loggingHandler wrapped proxy. The loggingHandler is basically a middleware for the proxy providing access logs and both implement the plain Go http.Handler interface.

    For each incoming http.Request the proxy will create a request context and enhance it with an Opentracing API Span. It will check proxy global ratelimits first and after that lookup the route in the routing table. After that skipper will apply all request filters, that can modify the http.Request. It will then check the route local ratelimits, the circuitbreakers and do the backend call. If the backend call got a TCP or TLS connection error in a loadbalanced route, skipper will do a retry to another backend of that loadbalanced group automatically. Just before the response to the caller, skipper will process the response filters, that can change the http.Response.

    In two special cases, skipper doesn\u2019t forward the request to the backend. When the route is shunted (<shunt>), skipper serves the request alone, by using only the filters. When the route is a <loopback>, the request is passed to the routing table for finding another route, based on the changes that the filters made to the request.

    "},{"location":"reference/architecture/#routing-mechanism","title":"Routing mechanism","text":"

    The routing executes the following steps in the typical case:

    1. Select the best fitting route by matching the request against the predicates. When no route found, respond with 404 (unless the default status code is configured to a different value).

    2. Execute the filters defined in the route in normal order on the request. The filters may or may not alter the request.

    3. Forward the request to the backend defined by the route and receive a response.

    4. Execute the filters defined in the route in reverse order on the response. The filters may or may not alter the response.

    5. Respond to the incoming request with the resulting response.

    "},{"location":"reference/architecture/#route-matching","title":"Route matching","text":"

    Skipper can handle a relatively large number of routes with acceptable performance, while being able to use any attribute of the incoming HTTP requests to distinguish between them. In order to be able to do so, the path matching predicates (Path() and PathSubtree() but not PathRegexp()) have a special role during route matching, which is a tradeoff by design, and needs to be kept in mind to understand in some cases why a certain route was matched for a request instead of another.

    The route matching logic can be summed up as follows:

    1. Lookup in the path tree based on the Path() and the PathSubtree() predicates, using the path component of the incoming request\u2019s URI. Then the remaining predicates of the found route(s) are evaluated.

      • the path lookup is a radix tree with O(log(n)) time complexity

      • in case of intersecting paths, the more specific path is matched in the tree

      • PathRegexp() is not used in the tree, but it is evaluated only after Path() or PathSubtree(), just like e.g. Method() or Host().

    2. If step #1 matches multiple routes, which means there are multiple routes in the same position of the path tree, and all other predicates match the request, too, then the route with the highest weight is matched.

      • this is an O(n) lookup, but only on the same leaf

      • the root of the tree is considered a single leaf, so if not using the Path() or PathSubtree() predicates, the entire lookup will become O(n) over all the routes.

    3. If #2 results in multiple matching routes, then one route will be selected. It is unspecified which one.

    "},{"location":"reference/backends/","title":"Backends","text":"

    A backend is the last part of a route and will define the backend to call for a given request that match the route.

    Generic route example:

    routeID: Predicate1 && Predicate2 -> filter1 -> filter2 -> <backend>;\n
    "},{"location":"reference/backends/#network-backend","title":"Network backend","text":"

    A network backend is an arbitrary HTTP or HTTPS URL, that will be called by the proxy.

    Route example with a network backend \"https://www.zalando.de/\":

    r0: Method(\"GET\")\n    -> setRequestHeader(\"X-Passed-Skipper\", \"true\")\n    -> \"https://www.zalando.de/\";\n

    Proxy example with request in access log

    ./bin/skipper -inline-routes 'r0: Method(\"GET\") -> setRequestHeader(\"X-Passed-Skipper\", \"true\") -> \"https://www.zalando.de/\";'\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] proxy listener on :9090\n[APP]INFO[0000] TLS settings not found, defaulting to HTTP\n[APP]INFO[0000] route settings, reset, route: r0: Method(\"GET\") -> setRequestHeader(\"X-Passed-Skipper\", \"true\") -> \"https://www.zalando.de/\"\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] route settings applied\n\n::1 - - [05/Feb/2019:14:31:05 +0100] \"GET / HTTP/1.1\" 200 164408 \"-\" \"curl/7.49.0\" 457 localhost:9090 - -\n

    Client example with request and response headers:

    $ curl -v localhost:9090 >/dev/null\n* Rebuilt URL to: localhost:9090/\n*   Trying ::1...\n* Connected to localhost (::1) port 9090 (#0)\n> GET / HTTP/1.1\n> Host: localhost:9090\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Cache-Control: no-cache, no-store, must-revalidate\n< Content-Type: text/html\n< Date: Tue, 05 Feb 2019 13:31:38 GMT\n< Link: <https://mosaic01.ztat.net/base-assets/require-2.1.22.min.js>; rel=\"preload\"; as=\"script\"; nopush; crossorigin\n< Pragma: no-cache\n< Server: Skipper\n< Set-Cookie: ...; Path=/; Domain=zalando.de; Expires=Sun, 04 Aug 2019 13:31:38 GMT; Max-Age=15552000; HttpOnly; Secure\n< Vary: Accept-Encoding\n< Transfer-Encoding: chunked\n<\n{ [3205 bytes data]\n

    "},{"location":"reference/backends/#shunt-backend","title":"Shunt backend","text":"

    A shunt backend, <shunt>, will not call a backend, but reply directly from the proxy itself. This can be used to shortcut, for example have a default that replies with 404 or use skipper as a backend serving static content in demos.

    Route Example proxying to \"https://www.zalando.de/\" in case Host header is set to \"zalando\" and rest will be served HTTP status code 404 with the body \"no matching route\":

    r0: Host(\"zalando\")\n    -> \"https://www.zalando.de/\";\nrest: *\n      -> status(404)\n      -> inlineContent(\"no matching route\")\n      -> <shunt>;\n

    Proxy configured as defined above with access log showing 404:

    $ ./bin/skipper -inline-routes 'r0: Host(\"zalando\") -> \"https://www.zalando.de/\"; rest: * -> status(404) -> inlineContent(\"no matching route\")  -> \"http://localhost:9999/\";'\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] proxy listener on :9090\n[APP]INFO[0000] TLS settings not found, defaulting to HTTP\n[APP]INFO[0000] route settings, reset, route: r0: Host(/zalando/) -> \"https://www.zalando.de/\"\n[APP]INFO[0000] route settings, reset, route: rest: * -> status(404) -> inlineContent(\"no matching route\") -> \"http://localhost:9999/\"\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] route settings applied\n::1 - - [05/Feb/2019:14:39:26 +0100] \"GET / HTTP/1.1\" 404 17 \"-\" \"curl/7.49.0\" 0 localhost:9090 - -\n

    Client example with request and response headers:

    $ curl -sv localhost:9090\n* Rebuilt URL to: localhost:9090/\n*   Trying ::1...\n* Connected to localhost (::1) port 9090 (#0)\n> GET / HTTP/1.1\n> Host: localhost:9090\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 404 Not Found\n< Content-Length: 17\n< Content-Type: text/plain; charset=utf-8\n< Server: Skipper\n< Date: Tue, 05 Feb 2019 13:37:27 GMT\n<\n* Connection #0 to host localhost left intact\nno matching route\n

    "},{"location":"reference/backends/#loopback-backend","title":"Loopback backend","text":"

    The loopback backend, <loopback>, will lookup again the routing table to a better matching route after processing the current route. Like this you can add some headers or change the request path for some specific matching requests.

    Example:

    • Route r0 is a route with loopback backend that will be matched for requests with paths that start with /api. The route will modify the http request removing /api in the path of the incoming request. In the second step of the routing the modified request will be matched by route r1.
    • Route r1 is a default route with a network backend to call \"https://www.zalando.de/\"
    r0: PathSubtree(\"/api\")\n    -> modPath(\"^/api\", \"\")\n    -> <loopback>;\nr1: * -> \"https://www.zalando.de/\";\n

    Proxy configured as defined above with access logs showing 404 for the first call and 200 for the second:

    $ ./bin/skipper -inline-routes 'r0: PathSubtree(\"/api\") -> setRequestHeader(\"X-Passed-Skipper\", \"true\") -> modPath(/^\\/api/, \"\") -> <loopback>;\nr1: * -> \"https://www.zalando.de/\";'\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] route settings, reset, route: r0: PathSubtree(\"/api\") -> setRequestHeader(\"X-Passed-Skipper\", \"true\") -> modPath(\"^/api\", \"\") -> <loopback>\n[APP]INFO[0000] route settings, reset, route: r1: * -> \"https://www.zalando.de/\"\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] route settings applied\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] proxy listener on :9090\n[APP]INFO[0000] TLS settings not found, defaulting to HTTP\n::1 - - [05/Feb/2019:14:54:14 +0100] \"GET /api/foo HTTP/1.1\" 404 98348 \"-\" \"curl/7.49.0\" 562 localhost:9090 - -\n::1 - - [05/Feb/2019:14:54:28 +0100] \"GET /api HTTP/1.1\" 200 164408 \"-\" \"curl/7.49.0\" 120 localhost:9090 - -\n

    Client example with request and response headers:

    $ curl -sv localhost:9090/api/foo >/dev/null\n*   Trying ::1...\n* Connected to localhost (::1) port 9090 (#0)\n> GET /api/foo HTTP/1.1\n> Host: localhost:9090\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 404 Not Found\n< Content-Language: de-DE\n< Content-Type: text/html;charset=UTF-8\n< Date: Tue, 05 Feb 2019 14:00:33 GMT\n< Transfer-Encoding: chunked\n<\n{ [2669 bytes data]\n* Connection #0 to host localhost left intact\n\n\n$ curl -sv localhost:9090/api >/dev/null\n*   Trying ::1...\n* Connected to localhost (::1) port 9090 (#0)\n> GET /api HTTP/1.1\n> Host: localhost:9090\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Cache-Control: no-cache, no-store, must-revalidate\n< Content-Type: text/html\n< Date: Tue, 05 Feb 2019 14:00:44 GMT\n< Link: <https://mosaic01.ztat.net/base-assets/require-2.1.22.min.js>; rel=\"preload\"; as=\"script\"; nopush; crossorigin\n< Transfer-Encoding: chunked\n<\n{ [3491 bytes data]\n

    If the request processing reaches the maximum number of loopbacks (by default max=9), the routing will result in an error.

    "},{"location":"reference/backends/#dynamic-backend","title":"Dynamic backend","text":"

    The dynamic backend, <dynamic>, will get the backend to call by data provided by filters. This allows skipper as library users to do proxy calls to a certain target from their own implementation dynamically looked up by their filters.

    Example shows how to set a target by a provided filter, which would be similar to a network backend:

    r0: * -> setDynamicBackendUrl(\"https://www.zalando.de\") -> <dynamic>;\n

    Proxy configured as defined above with access logs showing 200 for the call:

    $ ./bin/skipper -inline-routes 'r0: * -> setDynamicBackendUrl(\"https://www.zalando.de\") -> <dynamic>;'\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] proxy listener on :9090\n[APP]INFO[0000] TLS settings not found, defaulting to HTTP\n[APP]INFO[0000] route settings, reset, route: r0: * -> setDynamicBackendUrl(\"https://www.zalando.de\") -> <dynamic>\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] route settings applied\n::1 - - [05/Feb/2019:15:09:34 +0100] \"GET / HTTP/1.1\" 200 164408 \"-\" \"curl/7.49.0\" 132 localhost:9090 - -\n

    Client example with request and response headers:

    $ curl -sv localhost:9090/ >/dev/null\n*   Trying ::1...\n* Connected to localhost (::1) port 9090 (#0)\n> GET / HTTP/1.1\n> Host: localhost:9090\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Cache-Control: no-cache, no-store, must-revalidate\n< Content-Type: text/html\n< Date: Tue, 05 Feb 2019 14:09:34 GMT\n< Link: <https://mosaic01.ztat.net/base-assets/require-2.1.22.min.js>; rel=\"preload\"; as=\"script\"; nopush; crossorigin\n< Pragma: no-cache\n< Server: Skipper\n< Transfer-Encoding: chunked\n<\n{ [3491 bytes data]\n* Connection #0 to host localhost left intact\n

    When no filters modifying the target are set (e.g. r0: * -> <dynamic>;), the target host defaults to either the Host header or the host name given in the URL, and the target scheme defaults to either https when TLS is configured or http when TLS is not configured.

    "},{"location":"reference/backends/#load-balancer-backend","title":"Load Balancer backend","text":"

    The loadbalancer backend, <$algorithm, \"backend1\", \"backend2\">, will balance the load across all given backends using the algorithm set in $algorithm. If $algorithm is not specified it will use the default algorithm set by Skipper at start.

    Current implemented algorithms:

    • roundRobin: backend is chosen by the round robin algorithm, starting with a random selected backend to spread across all backends from the beginning
    • random: backend is chosen at random
    • consistentHash: backend is chosen by consistent hashing algorithm based on the request key. The request key is derived from X-Forwarded-For header or request remote IP address as the fallback. Use consistentHashKey filter to set the request key. Use consistentHashBalanceFactor to prevent popular keys from overloading a single backend endpoint.
    • powerOfRandomNChoices: backend is chosen by powerOfRandomNChoices algorithm with selecting N random endpoints and picking the one with least outstanding requests from them. (http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf)
    • TODO: https://github.com/zalando/skipper/issues/557

    All algorithms except powerOfRandomNChoices support fadeIn filter.

    Route example with 2 backends and the roundRobin algorithm:

    r0: * -> <roundRobin, \"http://127.0.0.1:9998\", \"http://127.0.0.1:9997\">;\n

    Route example with 2 backends and the random algorithm:

    r0: * -> <random, \"http://127.0.0.1:9998\", \"http://127.0.0.1:9997\">;\n

    Route example with 2 backends and the consistentHash algorithm:

    r0: * -> <consistentHash, \"http://127.0.0.1:9998\", \"http://127.0.0.1:9997\">;\n

    Route example with 2 backends and the powerOfRandomNChoices algorithm:

    r0: * -> <powerOfRandomNChoices, \"http://127.0.0.1:9998\", \"http://127.0.0.1:9997\">;\n

    Proxy with roundRobin loadbalancer and two backends:

    $ ./bin/skipper -inline-routes 'r0: *  -> <roundRobin, \"http://127.0.0.1:9998\", \"http://127.0.0.1:9997\">;'\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] route settings, reset, route: r0: * -> <roundRobin, \"http://127.0.0.1:9998\", \"http://127.0.0.1:9997\">\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] proxy listener on :9090\n[APP]INFO[0000] TLS settings not found, defaulting to HTTP\n[APP]INFO[0000] route settings applied\n::1 - - [05/Feb/2019:15:39:06 +0100] \"GET / HTTP/1.1\" 200 1 \"-\" \"curl/7.49.0\" 3 localhost:9090 - -\n::1 - - [05/Feb/2019:15:39:07 +0100] \"GET / HTTP/1.1\" 200 1 \"-\" \"curl/7.49.0\" 0 localhost:9090 - -\n::1 - - [05/Feb/2019:15:39:08 +0100] \"GET / HTTP/1.1\" 200 1 \"-\" \"curl/7.49.0\" 0 localhost:9090 - -\n::1 - - [05/Feb/2019:15:39:09 +0100] \"GET / HTTP/1.1\" 200 1 \"-\" \"curl/7.49.0\" 0 localhost:9090 - -\n

    Backend1 returns \u201cA\u201d in the body:

    $ ./bin/skipper -address=\":9998\" -inline-routes 'r0: * -> inlineContent(\"A\") -> <shunt>;'\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] proxy listener on :9998\n[APP]INFO[0000] TLS settings not found, defaulting to HTTP\n[APP]INFO[0000] route settings, reset, route: r0: * -> inlineContent(\"A\") -> <shunt>\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] route settings applied\n127.0.0.1 - - [05/Feb/2019:15:39:06 +0100] \"GET / HTTP/1.1\" 200 1 \"-\" \"curl/7.49.0\" 0 127.0.0.1:9998 - -\n127.0.0.1 - - [05/Feb/2019:15:39:08 +0100] \"GET / HTTP/1.1\" 200 1 \"-\" \"curl/7.49.0\" 0 127.0.0.1:9998 - -\n

    Backend2 returns \u201cB\u201d in the body:

    $ ./bin/skipper -address=\":9997\" -inline-routes 'r0: * -> inlineContent(\"B\") -> <shunt>;'\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] proxy listener on :9997\n[APP]INFO[0000] route settings, reset, route: r0: * -> inlineContent(\"B\") -> <shunt>\n[APP]INFO[0000] TLS settings not found, defaulting to HTTP\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] route settings applied\n127.0.0.1 - - [05/Feb/2019:15:39:07 +0100] \"GET / HTTP/1.1\" 200 1 \"-\" \"curl/7.49.0\" 0 127.0.0.1:9997 - -\n127.0.0.1 - - [05/Feb/2019:15:39:09 +0100] \"GET / HTTP/1.1\" 200 1 \"-\" \"curl/7.49.0\" 0 127.0.0.1:9997 - -\n

    Client:

    $ curl -s http://localhost:9090/\nA\n$ curl -s http://localhost:9090/\nB\n$ curl -s http://localhost:9090/\nA\n$ curl -s http://localhost:9090/\nB\n

    "},{"location":"reference/backends/#backend-protocols","title":"Backend Protocols","text":"

    Current implemented protocols:

    • http: (default) http protocol
    • fastcgi: (experimental) directly connect Skipper with a FastCGI backend like PHP FPM.

    Route example that uses FastCGI (experimental):

    php: * -> setFastCgiFilename(\"index.php\") -> \"fastcgi://127.0.0.1:9000\";\nphp_lb: * -> setFastCgiFilename(\"index.php\") -> <roundRobin, \"fastcgi://127.0.0.1:9000\", \"fastcgi://127.0.0.1:9001\">;\n

    "},{"location":"reference/development/","title":"Development","text":""},{"location":"reference/development/#how-to-develop-a-filter","title":"How to develop a Filter","text":"

    A filter is part of a route and can change arbitrary http data in the http.Request and http.Response path of a proxy.

    The filter example shows a non trivial diff of a filter implementation, that implements an authnz webhook. It shows global settings passed via flags, user documentation, developer documentation for library users, the filter implementation and some test cases. Tests should test the actual filter implementation in a proxy setup.

    "},{"location":"reference/development/#how-to-pass-options-to-your-filter","title":"How to pass options to your filter","text":"

    Set a default and a Usage string as const. Add a var to hold the value and put the flag to the category, that makes the most sense.

    If a filter, predicate or dataclient need Options passed from flags, then you should register the filter in skipper.go, the main library entrypoint. In case you do not need options from flags, use MakeRegistry() in ./filters/builtin/builtin.go to register your filter.

    diff --git a/cmd/skipper/main.go b/cmd/skipper/main.go\nindex 28f18f9..4530b85 100644\n--- a/cmd/skipper/main.go\n+++ b/cmd/skipper/main.go\n@@ -59,9 +59,10 @@ const (\n    defaultOAuthTokeninfoTimeout          = 2 * time.Second\n    defaultOAuthTokenintrospectionTimeout = 2 * time.Second\n+   defaultWebhookTimeout                 = 2 * time.Second\n\n    // generic:\n    addressUsage                         = \"network address that skipper should listen on\"\n@@ -141,6 +142,8 @@ const (\n    oauth2TokeninfoURLUsage              = \"sets the default tokeninfo URL to query information about an incoming OAuth2 token in oauth2Tokeninfo filters\"\n    oauth2TokeninfoTimeoutUsage          = \"sets the default tokeninfo request timeout duration to 2000ms\"\n    oauth2TokenintrospectionTimeoutUsage = \"sets the default tokenintrospection request timeout duration to 2000ms\"\n+   webhookTimeoutUsage                  = \"sets the webhook request timeout duration, defaults to 2s\"\n+\n    // connections, timeouts:\n    idleConnsPerHostUsage           = \"maximum idle connections per backend host\"\n    closeIdleConnsPeriodUsage       = \"period of closing all idle connections in seconds or as a duration string. Not closing when less than 0\"\n@@ -243,13 +246,14 @@ var (\n    oauth2TokeninfoURL              string\n    oauth2TokeninfoTimeout          time.Duration\n    oauth2TokenintrospectionTimeout time.Duration\n+   webhookTimeout                  time.Duration\n\n    // connections, timeouts:\n    idleConnsPerHost           int\n@@ -351,13 +355,14 @@ func init() {\n    flag.DurationVar(&oauth2TokeninfoTimeout, \"oauth2-tokeninfo-timeout\", defaultOAuthTokeninfoTimeout, oauth2TokeninfoTimeoutUsage)\n    flag.DurationVar(&oauth2TokenintrospectionTimeout, \"oauth2-tokenintrospect-timeout\", defaultOAuthTokenintrospectionTimeout, oauth2TokenintrospectionTimeoutUsage)\n+   flag.DurationVar(&webhookTimeout, \"webhook-timeout\", defaultWebhookTimeout, webhookTimeoutUsage)\n\n    // connections, timeouts:\n    flag.IntVar(&idleConnsPerHost, \"idle-conns-num\", proxy.DefaultIdleConnsPerHost, idleConnsPerHostUsage)\n@@ -536,13 +541,14 @@ func main() {\n        OAuthTokeninfoURL:              oauth2TokeninfoURL,\n        OAuthTokeninfoTimeout:          oauth2TokeninfoTimeout,\n        OAuthTokenintrospectionTimeout: oauth2TokenintrospectionTimeout,\n+       WebhookTimeout:                 webhookTimeout,\n\n        // connections, timeouts:\n        IdleConnectionsPerHost:     idleConnsPerHost,\n\ndiff --git a/skipper.go b/skipper.go\nindex 10d5769..da46fe0 100644\n--- a/skipper.go\n+++ b/skipper.go\n@@ -443,6 +443,9 @@ type Options struct {\n    // OAuthTokenintrospectionTimeout sets timeout duration while calling oauth tokenintrospection service\n    OAuthTokenintrospectionTimeout time.Duration\n\n+   // WebhookTimeout sets timeout duration while calling a custom webhook auth service\n+   WebhookTimeout time.Duration\n+\n    // MaxAuditBody sets the maximum read size of the body read by the audit log filter\n    MaxAuditBody int\n }\n@@ -677,7 +680,8 @@ func Run(o Options) error {\n        auth.NewOAuthTokenintrospectionAnyClaims(o.OAuthTokenintrospectionTimeout),\n        auth.NewOAuthTokenintrospectionAllClaims(o.OAuthTokenintrospectionTimeout),\n        auth.NewOAuthTokenintrospectionAnyKV(o.OAuthTokenintrospectionTimeout),\n-       auth.NewOAuthTokenintrospectionAllKV(o.OAuthTokenintrospectionTimeout))\n+       auth.NewOAuthTokenintrospectionAllKV(o.OAuthTokenintrospectionTimeout),\n+       auth.NewWebhook(o.WebhookTimeout))\n\n    // create a filter registry with the available filter specs registered,\n    // and register the custom filters\n
    "},{"location":"reference/development/#user-documentation","title":"User documentation","text":"

    Documentation for users should be done in docs/.

    diff --git a/docs/filters.md b/docs/filters.md\nindex d3bb872..a877062 100644\n--- a/docs/filters.md\n+++ b/docs/filters.md\n@@ -382,6 +382,24 @@ basicAuth(\"/path/to/htpasswd\")\n basicAuth(\"/path/to/htpasswd\", \"My Website\")\n ```\n\n+## webhook\n+\n+The `webhook` filter makes it possible to have your own authentication and\n+authorization endpoint as a filter.\n+\n+Headers from the incoming request will be copied into the request that\n+is being done to the webhook endpoint. Responses from the webhook with\n+status code less than 300 will be authorized, rest unauthorized.\n+\n+Examples:\n+\n+```\n+webhook(\"https://custom-webhook.example.org/auth\")\n+```\n+\n+The webhook timeout has a default of 2 seconds and can be globally\n+changed, if skipper is started with `-webhook-timeout=2s` flag.\n+\n ## oauthTokeninfoAnyScope\n\n If skipper is started with `-oauth2-tokeninfo-url` flag, you can use\n
    "},{"location":"reference/development/#add-godoc","title":"Add godoc","text":"

    Godoc is meant for developers using skipper as library, use doc.go of the package to document generic functionality, usage and library usage.

    diff --git a/filters/auth/doc.go b/filters/auth/doc.go\nindex 696d3fd..1d6e3a8 100644\n--- a/filters/auth/doc.go\n+++ b/filters/auth/doc.go\n@@ -318,5 +318,12 @@ filter after the auth filter.\n     a: Path(\"/only-allowed-audit-log\") -> oauthTokeninfoAnyScope(\"bar-w\") -> auditLog() -> \"https://internal.example.org/\";\n     b: Path(\"/all-access-requests-audit-log\") -> auditLog() -> oauthTokeninfoAnyScope(\"foo-r\") -> \"https://internal.example.org/\";\n\n+Webhook - webhook() filter\n+\n+The filter webhook allows you to have a custom authentication and\n+authorization endpoint for a route.\n+\n+    a: Path(\"/only-allowed-by-webhook\") -> webhook(\"https://custom-webhook.example.org/auth\") -> \"https://protected-backend.example.org/\";\n+\n */\n package auth\n
    "},{"location":"reference/development/#filter-implementation","title":"Filter implementation","text":"

    A filter can modify the incoming http.Request before calling the backend and the outgoing http.Response from the backend to the client.

    A filter consists of at least two types a spec and a filter. Spec consists of everything that is needed and known before a user will instantiate a filter.

    A spec will be created in the bootstrap procedure of a skipper process. A spec has to satisfy the Spec interface Name() string and CreateFilter([]interface{}) (filters.Filter, error).

    The actual filter implementation has to satisfy the Filter interface Request(filters.FilterContext) and Response(filters.FilterContext).

    diff --git a/filters/auth/webhook.go b/filters/auth/webhook.go\nnew file mode 100644\nindex 0000000..f0632a6\n--- /dev/null\n+++ b/filters/auth/webhook.go\n@@ -0,0 +1,84 @@\n+package auth\n+\n+import (\n+   \"net/http\"\n+   \"time\"\n+\n+   \"github.com/zalando/skipper/filters\"\n+)\n+\n+const (\n+   WebhookName = \"webhook\"\n+)\n+\n+type (\n+   webhookSpec struct {\n+       Timeout time.Duration\n+   }\n+   webhookFilter struct {\n+       authClient *authClient\n+   }\n+)\n+\n+// NewWebhook creates a new auth filter specification\n+// to validate authorization for requests.\n+func NewWebhook(d time.Duration) filters.Spec {\n+   return &webhookSpec{Timeout: d}\n+}\n+\n+func (*webhookSpec) Name() string {\n+   return WebhookName\n+}\n+\n+// CreateFilter creates an auth filter. The first argument is an URL\n+// string.\n+//\n+//     s.CreateFilter(\"https://my-auth-service.example.org/auth\")\n+//\n+func (ws *webhookSpec) CreateFilter(args []interface{}) (filters.Filter, error) {\n+   if l := len(args); l == 0 || l > 2 {\n+       return nil, filters.ErrInvalidFilterParameters\n+   }\n+\n+   s, ok := args[0].(string)\n+   if !ok {\n+       return nil, filters.ErrInvalidFilterParameters\n+   }\n+\n+   ac, err := newAuthClient(s, ws.Timeout)\n+   if err != nil {\n+       return nil, filters.ErrInvalidFilterParameters\n+   }\n+\n+   return &webhookFilter{authClient: ac}, nil\n+}\n+\n+func copyHeader(to, from http.Header) {\n+   for k, v := range from {\n+       to[http.CanonicalHeaderKey(k)] = v\n+   }\n+}\n+\n+func (f *webhookFilter) Request(ctx filters.FilterContext) {\n+   statusCode, err := f.authClient.getWebhook(ctx.Request())\n+   if err != nil {\n+       unauthorized(ctx, WebhookName, authServiceAccess, f.authClient.url.Hostname())\n+   }\n+   // redirects, auth errors, webhook errors\n+   if statusCode >= 300 {\n+       unauthorized(ctx, WebhookName, invalidAccess, f.authClient.url.Hostname())\n+   }\n+   authorized(ctx, WebhookName)\n+}\n+\n+func (*webhookFilter) Response(filters.FilterContext) {}\n
    "},{"location":"reference/development/#writing-tests","title":"Writing tests","text":"

    Skipper uses normal table driven Go tests without frameworks.

    This example filter test creates a backend, an auth service to be called by our filter, and a filter configured by our table driven test.

    In general we use real backends with dynamic port allocations. We call these and inspect the http.Response to check, if we get expected results for invalid and valid data.

    Skipper has some helpers to create the test proxy in the proxytest package. Backends can be created with httptest.NewServer as in the example below.

    diff --git a/filters/auth/webhook_test.go b/filters/auth/webhook_test.go\nnew file mode 100644\nindex 0000000..d43c4ea\n--- /dev/null\n+++ b/filters/auth/webhook_test.go\n@@ -0,0 +1,128 @@\n+package auth\n+\n+import (\n+   \"fmt\"\n+   \"io\"\n+   \"net/http\"\n+   \"net/http/httptest\"\n+   \"net/url\"\n+   \"testing\"\n+   \"time\"\n+\n+   \"github.com/zalando/skipper/eskip\"\n+   \"github.com/zalando/skipper/filters\"\n+   \"github.com/zalando/skipper/proxy/proxytest\"\n+)\n+\n+func TestWebhook(t *testing.T) {\n+   for _, ti := range []struct {\n+       msg        string\n+       token      string\n+       expected   int\n+       authorized bool\n+       timeout    bool\n+   }{{\n+       msg:        \"invalid-token-should-be-unauthorized\",\n+       token:      \"invalid-token\",\n+       expected:   http.StatusUnauthorized,\n+       authorized: false,\n+   }, {\n+       msg:        \"valid-token-should-be-authorized\",\n+       token:      testToken,\n+       expected:   http.StatusOK,\n+       authorized: true,\n+   }, {\n+       msg:        \"webhook-timeout-should-be-unauthorized\",\n+       token:      testToken,\n+       expected:   http.StatusUnauthorized,\n+       authorized: false,\n+       timeout:    true,\n+   }} {\n+       t.Run(ti.msg, func(t *testing.T) {\n+           backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {\n+               w.WriteHeader(http.StatusOK)\n+               io.WriteString(w, \"Hello from backend\")\n+               return\n+           }))\n+           defer backend.Close()\n+\n+           authServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n+               if ti.timeout {\n+                   time.Sleep(time.Second + time.Millisecond)\n+               }\n+\n+               if r.Method != \"GET\" {\n+                   w.WriteHeader(489)\n+                   io.WriteString(w, \"FAIL - not a GET request\")\n+                   return\n+               }\n+\n+               tok := r.Header.Get(authHeaderName)\n+               tok = tok[len(authHeaderPrefix):len(tok)]\n+               switch tok {\n+               case testToken:\n+                   w.WriteHeader(200)\n+                   fmt.Fprintln(w, \"OK - Got token: \"+tok)\n+                   return\n+               }\n+               w.WriteHeader(402)                            //http.StatusUnauthorized)\n+               fmt.Fprintln(w, \"Unauthorized - Got token: \") //+tok)\n+           }))\n+           defer authServer.Close()\n+\n+           spec := NewWebhook(time.Second)\n+\n+           args := []interface{}{\n+               \"http://\" + authServer.Listener.Addr().String(),\n+           }\n+           f, err := spec.CreateFilter(args)\n+           if err != nil {\n+               t.Errorf(\"error in creating filter for %s: %v\", ti.msg, err)\n+               return\n+           }\n+\n+           f2 := f.(*webhookFilter)\n+           defer f2.Close()\n+\n+           fr := make(filters.Registry)\n+           fr.Register(spec)\n+           r := &eskip.Route{Filters: []*eskip.Filter{{Name: spec.Name(), Args: args}}, Backend: backend.URL}\n+\n+           proxy := proxytest.New(fr, r)\n+           defer proxy.Close()\n+\n+           reqURL, err := url.Parse(proxy.URL)\n+           if err != nil {\n+               t.Errorf(\"Failed to parse url %s: %v\", proxy.URL, err)\n+               return\n+           }\n+\n+           req, err := http.NewRequest(\"GET\", reqURL.String(), nil)\n+           if err != nil {\n+               t.Errorf(\"failed to create request %v\", err)\n+               return\n+           }\n+           req.Header.Set(authHeaderName, authHeaderPrefix+ti.token)\n+\n+           rsp, err := http.DefaultClient.Do(req)\n+           if err != nil {\n+               t.Errorf(\"failed to get response: %v\", err)\n+               return\n+           }\n+           defer rsp.Body.Close()\n+\n+           buf := make([]byte, 128)\n+           var n int\n+           if n, err = rsp.Body.Read(buf); err != nil && err != io.EOF {\n+               t.Errorf(\"Could not read response body: %v\", err)\n+               return\n+           }\n+\n+           t.Logf(\"%d %d\", rsp.StatusCode, ti.expected)\n+           if rsp.StatusCode != ti.expected {\n+               t.Errorf(\"unexpected status code: %v != %v %d %s\", rsp.StatusCode, ti.expected, n, buf)\n+               return\n+           }\n+       })\n+   }\n+}\n
    "},{"location":"reference/development/#using-a-debugger","title":"Using a debugger","text":"

    Skipper supports plugins and to offer this support it uses the plugin library. Due to a bug in the Go compiler as reported here a debugger cannot be used. This issue will be fixed in Go 1.12 but until then the only workaround is to remove references to the plugin library. The following patch can be used for debugging.

    diff --git a/plugins.go b/plugins.go\nindex 837b6cf..aa69f09 100644\n--- a/plugins.go\n+++ b/plugins.go\n@@ -1,5 +1,6 @@\n package skipper\n\n+/*\n import (\n    \"fmt\"\n    \"os\"\n@@ -13,8 +14,13 @@ import (\n    \"github.com/zalando/skipper/filters\"\n    \"github.com/zalando/skipper/routing\"\n )\n+*/\n\n func (o *Options) findAndLoadPlugins() error {\n+   return nil\n+}\n+\n+/*\n    found := make(map[string]string)\n    done := make(map[string][]string)\n\n@@ -366,3 +372,4 @@ func readPluginConfig(plugin string) (conf []string, err error) {\n    }\n    return conf, nil\n }\n+*/\n

    The patch can be applied with the git apply $PATCH_FILE command. Please do not commit the modified plugins.go along with your changes.

    "},{"location":"reference/egress/","title":"Egress Proxy","text":"

    Disclaimer: Egress features are probably not feature complete. Please create Github Issues to show your ideas about this topic.

    The picture below shows an authentication use case with Bearer token injection, to show the egress traffic flow:

    Skipper has some features, which are egress specific. Some features, for example dropRequestHeader or ratelimit, might also be used, but are not listed here:

    • circuit breaker filters
    • consecutiveBreaker
    • rateBreaker
    • disableBreaker
    • bearerinjector filter, that injects tokens for an app
    • The secrets module that does
    • automated secrets rotation read from files used by bearerinjector filter
    • dynamic secrets lookup used by bearerinjector filter
    • encryption and decryption used by OpenID Connect filters
    "},{"location":"reference/egress/#secrets-module","title":"Secrets Module","text":"

    Disclaimer: the specified features might be changed to make use cases work in the future.

    "},{"location":"reference/egress/#automated-secrets-rotation","title":"Automated Secrets rotation","text":"

    Secrets are read from files. Files can be rewritten by third party tools to integrate whatever provider you want. In Kubernetes you can write Secrets with an API and read them using a rotated, mounted files from skipper for example.

    To specify files or directories to find secrets, you can use -credentials-paths command line flag. Filenames are used to define the name of the secret, which will be used as a lookup key.

    The files need to be created before skipper is started, and as of today skipper doesn\u2019t find new files automatically. This might change in the future.

    To change the default update interval, which defaults to 10m, you can use the -credentials-update-interval command line flag.

    "},{"location":"reference/egress/#example-bearer-injection","title":"Example bearer injection","text":"

    Create file /tmp/secrets/mytoken, that contains mytoken:

    mkdir /tmp/secrets; echo mytoken >/tmp/secrets/mytoken`.\n

    start fake service

    nc -l 8080\n

    start skipper proxy

    skipper -inline-routes='Host(\"host1\") -> bearerinjector(\"/tmp/secrets/mytoken\") -> \"http://127.0.0.1:8080/\"' -credentials-paths=/tmp/secrets -credentials-update-interval=10s\n..\n[APP]INFO[0004] Updated secret file: /tmp/secrets/mytoken\n..\n

    Client calls skipper proxy

    % curl -H\"Host: host1\" localhost:9090/foo\n^C\n

    fake service shows

    GET /foo HTTP/1.1\nHost: 127.0.0.1:8080\nUser-Agent: curl/7.49.0\nAccept: */*\nAuthorization: Bearer mytoken\nAccept-Encoding: gzip\n

    Change the secret: echo changedtoken >/tmp/secrets/mytoken. Wait until skipper logs: [APP]INFO[0010] update secret file: /tmp/secrets/mytoken

    Restart fake service (CTRL-c to stop)

    nc -l 8080\n

    Client calls skipper proxy retry:

    % curl -H\"Host: host1\" localhost:9090/foo\n^C\n

    fake service shows

    GET /foo HTTP/1.1\nHost: 127.0.0.1:8080\nUser-Agent: curl/7.49.0\nAccept: */*\nAuthorization: Bearer changedtoken\nAccept-Encoding: gzip\n

    This example showed bearer injection with secrets rotation.

    "},{"location":"reference/egress/#reach-multiple-services","title":"Reach multiple services","text":"

    Often your service wants to reach multiple services, so you need to differentiate these routes, somehow.

    For example your service needs to access a.example.com and b.example.com.

    One example is to use .localhost domain, so a.localhost and b.localhost in your application and in skipper routes you would have:

    a: Host(\"a.localhost\") -> bearerinjector(\"/tmp/secrets/mytoken\") -> \"https://a.example.com\"\nb: Host(\"b.localhost\") -> bearerinjector(\"/tmp/secrets/mytoken\") -> \"https://b.example.com\"\n

    You can also use host aliases, in Linux /etc/hosts, or in Kubernetes hostAliases:

    Pod spec:

    spec:\n  hostAliases:\n  - ip: 127.0.0.1\n    hostnames:\n    - a.local\n    - b.local\n
    "},{"location":"reference/egress/#future-todos","title":"Future - TODOs","text":"

    We want to experiment in how to best use skipper as egress proxy. One idea is to implement forward proxy via HTTP CONNECT and being able to use the routing to inject the right Authorization headers with the bearerinjector filter, for example.

    If you have ideas please add your thoughts in one of the issues, that match your idea or create a new one.

    "},{"location":"reference/filters/","title":"Skipper Filters","text":"

    The parameters can be strings, regex or float64 / int

    • string is a string surrounded by double quotes (\")
    • regex is a regular expression, surrounded by /, e.g. /^www\\.example\\.org(:\\d+)?$/
    • int / float64 are usual (decimal) numbers like 401 or 1.23456
    • time is a string in double quotes, parseable by time.Duration)

    Filters are a generic tool and can change HTTP header and body in the request and response path. Filter can be chained using the arrow operator ->.

    Example route with a match all, 2 filters and a backend:

    all: * -> filter1 -> filter2 -> \"http://127.0.0.1:1234/\";\n
    "},{"location":"reference/filters/#template-placeholders","title":"Template placeholders","text":"

    Several filters support template placeholders (${var}) in string parameters.

    Template placeholder is replaced by the value that is looked up in the following sources:

    • request method (${request.method})
    • request host (${request.host})
    • request url path (${request.path})
    • request url rawquery (${request.rawQuery}, encoded request URL query without ?, e.g. q=foo&r=bar)
    • request url query (if starts with request.query. prefix, e.g ${request.query.q} is replaced by q query parameter value)
    • request headers (if starts with request.header. prefix, e.g ${request.header.Content-Type} is replaced by Content-Type request header value)
    • request cookies (if starts with request.cookie. prefix, e.g ${request.cookie.PHPSESSID} is replaced by PHPSESSID request cookie value)
    • request IP address
      • ${request.source} - first IP address from X-Forwarded-For header or request remote IP address if header is absent, similar to Source predicate
      • ${request.sourceFromLast} - last IP address from X-Forwarded-For header or request remote IP address if header is absent, similar to SourceFromLast predicate
      • ${request.clientIP} - request remote IP address similar to ClientIP predicate
    • response headers (if starts with response.header. prefix, e.g ${response.header.Location} is replaced by Location response header value)
    • filter context path parameters (e.g. ${id} is replaced by id path parameter value)

    Missing value interpretation depends on the filter.

    Example route that rewrites path using template placeholder:

    u1: Path(\"/user/:id\") -> setPath(\"/v2/user/${id}\") -> <loopback>;\n

    Example route that creates header from query parameter:

    r: Path(\"/redirect\") && QueryParam(\"to\") -> status(303) -> setResponseHeader(\"Location\", \"${request.query.to}\") -> <shunt>;\n

    "},{"location":"reference/filters/#status","title":"status","text":"

    Sets the response status code to the given value, with no regards to the backend response.

    Parameters:

    • status code (int)

    Example:

    route1: Host(/^all401\\.example\\.org$/) -> status(401) -> <shunt>;\n
    "},{"location":"reference/filters/#comment","title":"comment","text":"

    No operation, only to comment the route or a group of filters of a route

    Parameters:

    • msg (string)

    Example:

    route1: *\n    -> comment(\"nothing to see\")\n    -> <shunt>;\n
    "},{"location":"reference/filters/#annotate","title":"annotate","text":"

    Annotate the route, subsequent annotations using the same key will overwrite the value. Other subsequent filters can use annotations to make decisions and should document the key and value they use.

    Parameters:

    • key (string)
    • value (string)

    Example:

    route1: *\n    -> annotate(\"never\", \"gonna give you up\")\n    -> annotate(\"never\", \"gonna let you down\")\n    -> <shunt>;\n
    "},{"location":"reference/filters/#http-headers","title":"HTTP Headers","text":""},{"location":"reference/filters/#preservehost","title":"preserveHost","text":"

    Sets the incoming Host: header on the outgoing backend connection.

    It can be used to override the proxyPreserveHost behavior for individual routes.

    Parameters: \u201ctrue\u201d or \u201cfalse\u201d

    • \u201ctrue\u201d - use the Host header from the incoming request
    • \u201cfalse\u201d - use the host from the backend address

    Example:

    route1: * -> preserveHost(\"true\") -> \"http://backend.example.org\";\n

    "},{"location":"reference/filters/#modrequestheader","title":"modRequestHeader","text":"

    Replace all matched regex expressions in the given header.

    Parameters:

    • header name (string)
    • the expression to match (regex)
    • the replacement (string)

    Example:

    enforce_www: * -> modRequestHeader(\"Host\", \"^zalando\\.(\\w+)$\", \"www.zalando.$1\") -> redirectTo(301);\n
    "},{"location":"reference/filters/#setrequestheader","title":"setRequestHeader","text":"

    Set headers for requests.

    Header value may contain template placeholders. If a template placeholder can\u2019t be resolved then filter does not set the header.

    Parameters:

    • header name (string)
    • header value (string)

    Examples:

    foo: * -> setRequestHeader(\"X-Passed-Skipper\", \"true\") -> \"https://backend.example.org\";\n
    // Ratelimit per resource\nPath(\"/resource/:id\") -> setRequestHeader(\"X-Resource-Id\", \"${id}\") -> clusterClientRatelimit(\"resource\", 10, \"1m\", \"X-Resource-Id\") -> \"https://backend.example.org\";\n
    "},{"location":"reference/filters/#appendrequestheader","title":"appendRequestHeader","text":"

    Same as setRequestHeader, but appends the provided value to the already existing ones.

    "},{"location":"reference/filters/#droprequestheader","title":"dropRequestHeader","text":"

    Removes a header from the request

    Parameters:

    • header name (string)

    Example:

    foo: * -> dropRequestHeader(\"User-Agent\") -> \"https://backend.example.org\";\n
    "},{"location":"reference/filters/#modresponseheader","title":"modResponseHeader","text":"

    Same as modRequestHeader, only for responses

    Parameters:

    • header name (string)
    • the expression to match (regex)
    • the replacement (string)

    Example:

    do_not_avoid_caching: * -> modResponseHeader(\"cache-control\", \"no-cache\", \"cache\") -> \"https://zalando.de\";\n
    "},{"location":"reference/filters/#setresponseheader","title":"setResponseHeader","text":"

    Same as setRequestHeader, only for responses

    Example:

    set_cookie_with_path_param:\n  Path(\"/path/:id\") && Method(\"GET\")\n  -> setResponseHeader(\"Set-Cookie\", \"cid=${id}; Max-Age=36000; Secure\")\n  -> redirectTo(302, \"/\")\n  -> <shunt>\n
    "},{"location":"reference/filters/#appendresponseheader","title":"appendResponseHeader","text":"

    Same as appendRequestHeader, only for responses

    "},{"location":"reference/filters/#dropresponseheader","title":"dropResponseHeader","text":"

    Same as dropRequestHeader but for responses from the backend

    "},{"location":"reference/filters/#setcontextrequestheader","title":"setContextRequestHeader","text":"

    Set headers for requests using values from the filter context (state bag). If the provided key (second parameter) cannot be found in the state bag, then it doesn\u2019t set the header.

    Parameters:

    • header name (string)
    • key in the state bag (string)

    The route in the following example checks whether the request is authorized with the oauthTokeninfoAllScope() filter. This filter stores the authenticated user with \u201cauth-user\u201d key in the context, and the setContextRequestHeader() filter in the next step stores it in the header of the outgoing request with the X-Uid name:

    foo: * -> oauthTokeninfoAllScope(\"address_service.all\") -> setContextRequestHeader(\"X-Uid\", \"auth-user\") -> \"https://backend.example.org\";\n
    "},{"location":"reference/filters/#appendcontextrequestheader","title":"appendContextRequestHeader","text":"

    Same as setContextRequestHeader, but appends the provided value to the already existing ones.

    "},{"location":"reference/filters/#setcontextresponseheader","title":"setContextResponseHeader","text":"

    Same as setContextRequestHeader, except for responses.

    "},{"location":"reference/filters/#appendcontextresponseheader","title":"appendContextResponseHeader","text":"

    Same as appendContextRequestHeader, except for responses.

    "},{"location":"reference/filters/#copyrequestheader","title":"copyRequestHeader","text":"

    Copies value of a given request header to another header.

    Parameters:

    • source header name (string)
    • target header name (string)

    Example:

    foo: * -> copyRequestHeader(\"X-Foo\", \"X-Bar\") -> \"https://backend.example.org\";\n
    "},{"location":"reference/filters/#copyresponseheader","title":"copyResponseHeader","text":"

    Same as copyRequestHeader, except for responses.

    "},{"location":"reference/filters/#corsorigin","title":"corsOrigin","text":"

    The filter accepts an optional variadic list of acceptable origin parameters. If the input argument list is empty, the header will always be set to * which means any origin is acceptable. Otherwise the header is only set if the request contains an Origin header and its value matches one of the elements in the input list. The header is only set on the response.

    Parameters:

    • url (variadic string)

    Examples:

    corsOrigin()\ncorsOrigin(\"https://www.example.org\")\ncorsOrigin(\"https://www.example.org\", \"http://localhost:9001\")\n
    "},{"location":"reference/filters/#headertoquery","title":"headerToQuery","text":"

    Filter which assigns the value of a given header from the incoming Request to a given query param

    Parameters:

    • The name of the header to pick from request
    • The name of the query param key to add to request

    Examples:

    headerToQuery(\"X-Foo-Header\", \"foo-query-param\")\n

    The above filter will set foo-query-param query param respectively to the X-Foo-Header header and will override the value if the queryparam exists already

    "},{"location":"reference/filters/#flowid","title":"flowId","text":"

    Sets an X-Flow-Id header, if it\u2019s not already in the request. This allows you to have a trace in your logs, that traces from the incoming request on the edge to all backend services.

    Flow IDs must be in a certain format to be reusable in skipper. Valid formats depend on the generator used in skipper. Default generator creates IDs of length 16 matching the following regex: ^[0-9a-zA-Z+-]+$

    Parameters:

    • no parameter: resets always the X-Flow-Id header to a new value
    • \"reuse\": only create X-Flow-Id header if not already set or if the value is invalid in the request

    Example:

    * -> flowId() -> \"https://some-backend.example.org\";\n* -> flowId(\"reuse\") -> \"https://some-backend.example.org\";\n
    "},{"location":"reference/filters/#xforward","title":"xforward","text":"

    Standard proxy headers. Appends the client remote IP to the X-Forwarded-For and sets the X-Forwarded-Host header.

    "},{"location":"reference/filters/#xforwardfirst","title":"xforwardFirst","text":"

    Same as xforward, but instead of appending the last remote IP, it prepends it to comply with the approach of certain LB implementations.

    "},{"location":"reference/filters/#http-path","title":"HTTP Path","text":""},{"location":"reference/filters/#modpath","title":"modPath","text":"

    Replace all matched regex expressions in the path.

    Parameters:

    • the expression to match (regex)
    • the replacement (string)

    Example:

    rm_api: Path(\"/api\") -> modPath(\"/api\", \"/\") -> \"https://backend.example.org\";\nappend_bar: Path(\"/foo\") -> modPath(\"/foo\", \"/foo/bar\") -> \"https://backend.example.org\";\nnew_base: PathSubtree(\"/base\") -> modPath(\"/base\", \"/new/base) -> \"https://backend.example.org\";\nrm_api_regex: Path(\"/api\") -> modPath(\"^/api/(.*)/v2$\", \"/$1\") -> \"https://backend.example.org\";\n
    "},{"location":"reference/filters/#setpath","title":"setPath","text":"

    Replace the path of the original request to the replacement.

    Parameters:

    • the replacement (string)

    The replacement may contain template placeholders. If a template placeholder can\u2019t be resolved then empty value is used for it.

    "},{"location":"reference/filters/#http-redirect","title":"HTTP Redirect","text":""},{"location":"reference/filters/#redirectto","title":"redirectTo","text":"

    Creates an HTTP redirect response.

    Parameters:

    • redirect status code (int)
    • location (string) - optional

    Example:

    redirect1: PathRegexp(/^\\/foo\\/bar/) -> redirectTo(302, \"/foo/newBar\") -> <shunt>;\nredirect2: * -> redirectTo(301) -> <shunt>;\n
    • Route redirect1 will do a redirect with status code 302 to https with new path /foo/newBar for requests, that match the path /foo/bar.
    • Route redirect2 will do a https redirect with status code 301 for all incoming requests that match no other route

    see also redirect-handling

    "},{"location":"reference/filters/#redirecttolower","title":"redirectToLower","text":"

    Same as redirectTo, but replaces all strings to lower case.

    "},{"location":"reference/filters/#http-query","title":"HTTP Query","text":""},{"location":"reference/filters/#stripquery","title":"stripQuery","text":"

    Removes the query parameter from the request URL, and if the first filter parameter is \"true\", preserves the query parameter in the form of x-query-param-<queryParamName>: <queryParamValue> headers, so that ?foo=bar becomes x-query-param-foo: bar

    Example:

    * -> stripQuery() -> \"http://backend.example.org\";\n* -> stripQuery(\"true\") -> \"http://backend.example.org\";\n

    "},{"location":"reference/filters/#setquery","title":"setQuery","text":"

    Set the query string ?k=v in the request to the backend to a given value.

    Parameters:

    • key (string)
    • value (string)

    Key and value may contain template placeholders. If a template placeholder can\u2019t be resolved then empty value is used for it.

    Example:

    setQuery(\"k\", \"v\")\n
    "},{"location":"reference/filters/#dropquery","title":"dropQuery","text":"

    Delete the query string ?k=v in the request to the backend for a given key.

    Parameters:

    • key (string)

    Key may contain template placeholders. If a template placeholder can\u2019t be resolved then empty value is used for it.

    Example:

    dropQuery(\"k\")\n
    "},{"location":"reference/filters/#querytoheader","title":"queryToHeader","text":"

    Filter which assigns the value of a given query param from the incoming Request to a given Header with optional format string value.

    Parameters:

    • The name of the query param key to pick from request
    • The name of the header to add to request
    • The format string used to create the header value, which gets the value from the query value as before

    Examples:

    queryToHeader(\"foo-query-param\", \"X-Foo-Header\")\nqueryToHeader(\"access_token\", \"Authorization\", \"Bearer %s\")\n

    The first filter will set X-Foo-Header header respectively to the foo-query-param query param and will not override the value if the header exists already.

    The second filter will set Authorization header to the access_token query param with a prefix value Bearer and will not override the value if the header exists already.

    "},{"location":"reference/filters/#tls","title":"TLS","text":"

    Filters that provide access to TLS data of a request.

    "},{"location":"reference/filters/#tlspassclientcertificates","title":"tlsPassClientCertificates","text":"

    This filter copies TLS client certificates encoded as pem into the X-Forwarded-Tls-Client-Cert header. Multiple certificates are separated by ,.

    Example:

    * -> tlsPassClientCertificates() -> \"http://10.2.5.21:8080\";\n
    "},{"location":"reference/filters/#diagnostics","title":"Diagnostics","text":"

    These filters are meant for diagnostic or load testing purposes.

    "},{"location":"reference/filters/#randomcontent","title":"randomContent","text":"

    Generate response with random text of specified length.

    Parameters:

    • length of data (int)

    Example:

    * -> randomContent(42) -> <shunt>;\n
    "},{"location":"reference/filters/#repeatcontent","title":"repeatContent","text":"

    Generate response of specified size from repeated text.

    Parameters:

    • text to repeat (string)
    • size of response in bytes (int)

    Example:

    * -> repeatContent(\"I will not waste chalk. \", 1000) -> <shunt>;\n
    "},{"location":"reference/filters/#repeatcontenthex","title":"repeatContentHex","text":"

    Generate response of specified size from repeated bytes.

    Parameters:

    • bytes to repeat (hexadecimal string)
    • size of response in bytes (int)

    Example:

    * -> repeatContentHex(\"00\", 100) -> <shunt>\n
    // Create binary response using size equal to the number of bytes to repeat, i.e. repeat once\n* -> repeatContentHex(\"68657861646563696d616c\", 11) -> <shunt>\n
    "},{"location":"reference/filters/#wrapcontent","title":"wrapContent","text":"

    Add prefix and suffix to the response.

    Parameters:

    • prefix (string)
    • suffix (string)

    Examples:

    * -> wrapContent(\"foo\", \"baz\") -> inlineContent(\"bar\") -> <shunt>\n
    // JSON array of 100 zeros\n* -> wrapContent(\"[\", \"0]\") -> repeatContent(\"0, \", 297) -> <shunt>\n
    "},{"location":"reference/filters/#wrapcontenthex","title":"wrapContentHex","text":"

    Add prefix and suffix to the response.

    Parameters:

    • prefix (hexadecimal string)
    • suffix (hexadecimal string)

    Examples:

    * -> wrapContentHex(\"68657861\", \"6d616c\") -> inlineContent(\"deci\") -> <shunt>\n
    // 1G of gzip-compressed text\n*\n-> setResponseHeader(\"Content-Encoding\", \"gzip\")\n-> wrapContentHex(\n  \"1f8b08000000000004ffecd6b10d00200804c05598c5b80a852d0ee422762ce61c2657d212f8bf9915bb6f9f8c51b9c26c1feec13fc80379a80ff4210ff0000ff0000ff0000ff0000ff0000ff0000ff0000ff0000ff0000ff0000ff0000ffce781070000ffffecd6810c000000c0207feb737c8ba2f8cd6f7ef39bdffce637bf\",\n  \"7dc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077ce0ff81000000ffffecd6810000000080207feb418ea278ce739ef39ce73ce739cf7de0f581000000ffff010000ffff5216994600ca9a3b\"\n)\n-> repeatContentHex(\"7dc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077cc0077ce0ff81000000ffffecd6810c000000c0207feb737c8ba278ce739ef39ce73ce739cf\", 8300624)\n-> <shunt>\n

    You may use https://github.com/AlexanderYastrebov/unrepeat to decompose binary file into prefix, repeating content and suffix.

    "},{"location":"reference/filters/#latency","title":"latency","text":"

    Enable adding artificial latency

    Parameters:

    • latency in milliseconds (int) or in time as a string in double quotes, parseable by time.Duration)

    Example:

    * -> latency(120) -> \"https://www.example.org\";\n* -> latency(\"120ms\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#bandwidth","title":"bandwidth","text":"

    Enable bandwidth throttling.

    Parameters:

    • bandwidth in kb/s (int)

    Example:

    * -> bandwidth(30) -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#chunks","title":"chunks","text":"

    Enables adding chunking responses with custom chunk size with artificial delays in between response chunks. To disable delays, set the second parameter to \u201c0\u201d.

    Parameters:

    • byte length (int)
    • time duration (time.Duration)

    Example:

    * -> chunks(1024, \"120ms\") -> \"https://www.example.org\";\n* -> chunks(1024, \"0\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#backendlatency","title":"backendLatency","text":"

    Same as latency filter, but on the request path and not on the response path.

    "},{"location":"reference/filters/#backendbandwidth","title":"backendBandwidth","text":"

    Same as bandwidth filter, but on the request path and not on the response path.

    "},{"location":"reference/filters/#backendchunks","title":"backendChunks","text":"

    Same as chunks filter, but on the request path and not on the response path.

    "},{"location":"reference/filters/#tarpit","title":"tarpit","text":"

    The tarpit filter discards the request and respond with a never ending stream of chunked response payloads. The goal is to consume the client connection without letting the client know what is happening.

    Parameters:

    • time duration (time.Duration)

    Example:

    * -> tarpit(\"1s\") -> <shunt>;\n

    The example will send every second a chunk of response payload.

    "},{"location":"reference/filters/#absorb","title":"absorb","text":"

    The absorb filter reads and discards the payload of the incoming requests. It logs with INFO level and a unique ID per request:

    • the event of receiving the request
    • partial and final events for consuming request payload and total consumed byte count
    • the finishing event of the request
    • any read errors other than EOF
    "},{"location":"reference/filters/#absorbsilent","title":"absorbSilent","text":"

    The absorbSilent filter reads and discards the payload of the incoming requests. It only logs read errors other than EOF.

    "},{"location":"reference/filters/#uniformrequestlatency","title":"uniformRequestLatency","text":"

    The uniformRequestLatency filter introduces uniformly distributed jitter latency within [mean-delta, mean+delta] interval for requests. The first parameter is the mean and the second is delta. In the example we would sleep for 100ms+/-10ms.

    Example:

    * -> uniformRequestLatency(\"100ms\", \"10ms\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#normalrequestlatency","title":"normalRequestLatency","text":"

    The normalRequestLatency filter introduces normally distributed jitter latency with configured mean value for requests. The first parameter is \u00b5 (mean) and the second is \u03c3 as in https://en.wikipedia.org/wiki/Normal_distribution.

    Example:

    * -> normalRequestLatency(\"10ms\", \"5ms\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#histogramrequestlatency","title":"histogramRequestLatency","text":"

    The histogramRequestLatency adds latency to requests according to the histogram distribution. It expects a list of interleaved duration strings and numbers that defines a histogram. Duration strings define boundaries of consecutive buckets and numbers define bucket weights. The filter randomly selects a bucket with probability equal to its weight divided by the sum of all bucket weights (which must be non-zero) and then sleeps for a random duration in between bucket boundaries.

    Example:

    r: * -> histogramRequestLatency(\"0ms\", 50, \"5ms\", 0, \"10ms\", 30, \"15ms\", 20, \"20ms\") -> \"https://www.example.org\";\n

    The example above adds a latency * between 0ms and 5ms to 50% of the requests * between 5ms and 10ms to 0% of the requests * between 10ms and 15ms to 30% of the requests * and between 15ms and 20ms to 20% of the requests.

    "},{"location":"reference/filters/#uniformresponselatency","title":"uniformResponseLatency","text":"

    The uniformResponseLatency filter introduces uniformly distributed jitter latency within [mean-delta, mean+delta] interval for responses. The first parameter is the mean and the second is delta. In the example we would sleep for 100ms+/-10ms.

    Example:

    * -> uniformRequestLatency(\"100ms\", \"10ms\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#normalresponselatency","title":"normalResponseLatency","text":"

    The normalResponseLatency filter introduces normally distributed jitter latency with configured mean value for responses. The first parameter is \u00b5 (mean) and the second is \u03c3 as in https://en.wikipedia.org/wiki/Normal_distribution.

    Example:

    * -> normalRequestLatency(\"10ms\", \"5ms\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#histogramresponselatency","title":"histogramResponseLatency","text":"

    The histogramResponseLatency adds latency to responses according to the histogram distribution, similar to histogramRequestLatency.

    "},{"location":"reference/filters/#logheader","title":"logHeader","text":"

    The logHeader filter prints the request line and the header, but not the body, to stderr. Note that this filter should be used only in diagnostics setup and with care, since the request headers may contain sensitive data, and they also can explode the amount of logs. Authorization headers will be truncated in request and response header logs. You can log request or response headers, which defaults for backwards compatibility to request headers.

    Parameters:

    • no arg, similar to: \u201crequest\u201d
    • \u201crequest\u201d or \u201cresponse\u201d (string varargs)

    Example:

    * -> logHeader() -> \"https://www.example.org\";\n* -> logHeader(\"request\") -> \"https://www.example.org\";\n* -> logHeader(\"response\") -> \"https://www.example.org\";\n* -> logHeader(\"request\", \"response\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#logbody","title":"logBody","text":"

    The logBody filter logs the request or response body in chunks while streaming. Chunks start with logBody(\"request\") $flowid: or logBody(\"response\") $flowid:, such that you can find all chunks belonging to a given flow. See also flowId() filter.

    Note that this filter should be used only in diagnostics setup and with care, since the request and response body may contain sensitive data. Logs can also explode in the amount of bytes, so you have to choose a limit. You can log request or response bodies. This filter has close to no overhead other than the I/O created by the logger.

    Parameters:

    • type: \u201crequest\u201d or \u201cresponse\u201d (string)
    • limit: maximum number of bytes to log (int)

    Example:

    * -> logBody(\"request\", 1024) -> \"https://www.example.org\";\n* -> logBody(\"response\", 1024) -> \"https://www.example.org\";\n* -> logBody(\"request\", 1024) -> logBody(\"response\", 1024) -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#timeout","title":"Timeout","text":""},{"location":"reference/filters/#backendtimeout","title":"backendTimeout","text":"

    Configure backend timeout. Skipper responds with 504 Gateway Timeout status if obtaining a connection, sending the request, and reading the backend response headers and body takes longer than the configured timeout. However, if response streaming has already started it will be terminated, i.e. client will receive backend response status and truncated response body.

    Parameters:

    • timeout (duration string)

    Example:

    * -> backendTimeout(\"10ms\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#readtimeout","title":"readTimeout","text":"

    Configure read timeout will set a read deadline on the server socket connected to the client connecting to the proxy. Skipper will log 499 client timeout with context canceled. We are not able to differentiate between client hang up and read timeout.

    Parameters:

    • timeout (duration string)

    Example:

    * -> readTimeout(\"10ms\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#writetimeout","title":"writeTimeout","text":"

    Configure write timeout will set a write deadline on the server socket connected to the client connecting to the proxy. Skipper will show access logs as if the response was served as expected, but the client can show an error. You can observe an increase in streaming errors via metrics or a in opentracing proxy span you can see Tag streamBody.byte with value streamBody error or in debug logs something like error while copying the response stream: write tcp 127.0.0.1:9090->127.0.0.1:38574: i/o timeout.

    Parameters:

    • timeout (duration string)

    Example:

    * -> writeTimeout(\"10ms\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#shadow-traffic","title":"Shadow Traffic","text":""},{"location":"reference/filters/#tee","title":"tee","text":"

    Provides a unix-like tee feature for routing.

    Using this filter, the request will be sent to a \u201cshadow\u201d backend in addition to the main backend of the route.

    Example:

    * -> tee(\"https://audit-logging.example.org\") -> \"https://foo.example.org\";\n

    This will send an identical request for foo.example.org to audit-logging.example.org. Another use case could be using it for benchmarking a new backend with some real traffic. This we call \u201cshadow traffic\u201d.

    The above route will forward the request to https://foo.example.org as it normally would do, but in addition to that, it will send an identical request to https://audit-logging.example.org. The request sent to https://audit-logging.example.org will receive the same method and headers, and a copy of the body stream. The tee response is ignored for this shadow backend.

    It is possible to change the path of the tee request, in a similar way to the modPath filter:

    Path(\"/api/v1\") -> tee(\"https://api.example.org\", \"^/v1\", \"/v2\" ) -> \"http://api.example.org\";\n

    In the above example, one can test how a new version of an API would behave on incoming requests.

    "},{"location":"reference/filters/#teenf","title":"teenf","text":"

    The same as tee filter, but does not follow redirects from the backend.

    "},{"location":"reference/filters/#teeloopback","title":"teeLoopback","text":"

    This filter provides a unix-like tee feature for routing, but unlike the tee, this filter feeds the copied request to the start of the routing, including the route lookup and executing the filters on the matched route.

    It is recommended to use this solution instead of the tee filter, because the same routing facilities are used for the outgoing tee requests as for the normal requests, and all the filters and backend types are supported.

    To ensure that the right route, or one of the right set of routes, is matched after the loopback, use the filter together with the Tee predicate, however, this is not mandatory if the request is changed via other filters, such that other predicates ensure matching the right route. To avoid infinite looping, the number of requests spawn from a single incoming request is limited similarly as in case of the loopback backend.

    Parameters:

    • tee group (string): a label identifying which routes should match the loopback request, marked with the Tee predicate

    Example, generate shadow traffic from 10% of the production traffic:

    main: * -> \"https://main-backend.example.org\";\nsplit: Traffic(.1) -> teeLoopback(\"test-A\") -> \"https://main-backend.example.org\";\nshadow: Tee(\"test-A\") && True() -> \"https://test-backend.example.org\";\n

    See also:

    • Tee predicate
    • Shadow Traffic Tutorial
    "},{"location":"reference/filters/#http-body","title":"HTTP Body","text":""},{"location":"reference/filters/#compress","title":"compress","text":"

    The filter, when executed on the response path, checks if the response entity can be compressed. To decide, it checks the Content-Encoding, the Cache-Control and the Content-Type headers. It doesn\u2019t compress the content if the Content-Encoding is set to other than identity, or the Cache-Control applies the no-transform pragma, or the Content-Type is set to an unsupported value.

    The default supported content types are: text/plain, text/html, application/json, application/javascript, application/x-javascript, text/javascript, text/css, image/svg+xml, application/octet-stream.

    The default set of MIME types can be reset or extended by passing in the desired types as filter arguments. When extending the defaults, the first argument needs to be \"...\". E.g. to compress tiff in addition to the defaults:

    * -> compress(\"...\", \"image/tiff\") -> \"https://www.example.org\"\n

    To reset the supported types, e.g. to compress only HTML, the \u201c\u2026\u201d argument needs to be omitted:

    * -> compress(\"text/html\") -> \"https://www.example.org\"\n

    It is possible to control the compression level, by setting it as the first filter argument, in front of the MIME types. The default compression level is best-speed. The possible values are integers between 0 and 9 (inclusive), where 0 means no-compression, 1 means best-speed and 11 means best-compression. Example:

    * -> compress(11, \"image/tiff\") -> \"https://www.example.org\"\n

    The filter also checks the incoming request, if it accepts the supported encodings, explicitly stated in the Accept-Encoding header. The filter currently supports by default gzip, deflate and br (can be overridden with flag compress-encodings). It does not assume that the client accepts any encoding if the Accept-Encoding header is not set. It ignores * in the Accept-Encoding header.

    Supported encodings are prioritized on: - quality value provided by client - compress-encodings flag following order as provided if quality value is equal - gzip, deflate, br in this order otherwise

    When compressing the response, it updates the response header. It deletes the Content-Length value triggering the proxy to always return the response with chunked transfer encoding, sets the Content-Encoding to the selected encoding and sets the Vary: Accept-Encoding header, if missing.

    The compression happens in a streaming way, using only a small internal buffer.

    "},{"location":"reference/filters/#decompress","title":"decompress","text":"

    The filter, when executed on the response path, checks if the response entity is compressed by a supported algorithm (gzip, deflate, br). To decide, it checks the Content-Encoding header.

    When compressing the response, it updates the response header. It deletes the Content-Length value triggering the proxy to always return the response with chunked transfer encoding, deletes the Content-Encoding and the Vary headers, if set.

    The decompression happens in a streaming way, using only a small internal buffer.

    Example:

    * -> decompress() -> \"https://www.example.org\"\n
    "},{"location":"reference/filters/#static","title":"static","text":"

    Serves static content from the filesystem.

    Parameters:

    • Request path to strip (string)
    • Target base path in the filesystem (string)

    Example:

    This serves files from /srv/www/dehydrated when requested via /.well-known/acme-challenge/, e.g. the request GET /.well-known/acme-challenge/foo will serve the file /srv/www/dehydrated/foo.

    acme: Host(/./) && Method(\"GET\") && Path(\"/.well-known/acme-challenge/*\")\n    -> static(\"/.well-known/acme-challenge/\", \"/srv/www/dehydrated\") -> <shunt>;\n

    Notes:

    • redirects to the directory when a file index.html exists and it is requested, i.e. GET /foo/index.html redirects to /foo/ which serves then the /foo/index.html
    • serves the content of the index.html when a directory is requested
    • does a simple directory listing of files / directories when no index.html is present
    "},{"location":"reference/filters/#inlinecontent","title":"inlineContent","text":"

    Returns arbitrary content in the HTTP body.

    Parameters:

    • content (string)
    • content type (string) - optional

    Example:

    * -> inlineContent(\"<h1>Hello</h1>\") -> <shunt>\n* -> inlineContent(\"[1,2,3]\", \"application/json\") -> <shunt>\n* -> status(418) -> inlineContent(\"Would you like a cup of tea?\") -> <shunt>\n

    Content type will be automatically detected when not provided using https://mimesniff.spec.whatwg.org/#rules-for-identifying-an-unknown-mime-type algorithm. Note that content detection algorithm does not contain any rules for recognizing JSON.

    Note

    inlineContent filter sets the response on request path and starts the response path immediately. The rest of the filter chain and backend are ignored and therefore inlineContent filter must be the last in the chain.

    "},{"location":"reference/filters/#inlinecontentifstatus","title":"inlineContentIfStatus","text":"

    Returns arbitrary content in the HTTP body, if the response has the specified status code.

    Parameters:

    • status code (int)
    • content (string)
    • content type (string) - optional

    Example:

    * -> inlineContentIfStatus(404, \"<p class=\\\"problem\\\">We don't have what you're looking for.</p>\") -> \"https://www.example.org\"\n* -> inlineContentIfStatus(401, \"{\\\"error\\\": \\\"unauthorized\\\"}\", \"application/json\") -> \"https://www.example.org\"\n

    The content type will be automatically detected when not provided.

    "},{"location":"reference/filters/#blockcontent","title":"blockContent","text":"

    Block a request based on it\u2019s body content.

    The filter max buffer size is 2MiB by default and can be overridden with -max-matcher-buffer-size=<int>.

    Parameters:

    • toblockList (List of strings)

    Example:

    * -> blockContent(\"Malicious Content\") -> \"http://example.com\";\n
    "},{"location":"reference/filters/#blockcontenthex","title":"blockContentHex","text":"

    Block a request based on it\u2019s body content.

    The filter max buffer size is 2MiB by default and can be overridden with -max-matcher-buffer-size=<int>.

    Parameters:

    • toblockList (List of hex string)

    Example:

    * -> blockContentHex(`000a`) -> \"http://example.com\";\n* -> blockContentHex(\"deadbeef\", \"000a\") -> \"http://example.com\";\n
    "},{"location":"reference/filters/#sed","title":"sed","text":"

    The filter sed replaces all occurrences of a pattern with a replacement string in the response body.

    Example:

    editorRoute: * -> sed(\"foo\", \"bar\") -> \"https://www.example.org\";\n

    Example with larger max buffer:

    editorRoute: * -> sed(\"foo\", \"bar\", 64000000) -> \"https://www.example.org\";\n

    This filter expects a regexp pattern and a replacement string as arguments. During the streaming of the response body, every occurrence of the pattern will be replaced with the replacement string. The editing doesn\u2019t happen right when the filter is executed, only later when the streaming normally happens, after all response filters were called.

    The sed() filter accepts two optional arguments, the max editor buffer size in bytes, and max buffer handling flag. The max buffer size, when set, defines how much data can be buffered at a given time by the editor. The default value is 2MiB. The max buffer handling flag can take one of two values: \u201cabort\u201d or \u201cbest-effort\u201d (default). Setting \u201cabort\u201d means that the stream will be aborted when reached the limit. Setting \u201cbest-effort\u201d, will run the replacement on the available content, in case of certain patterns, this may result in content that is different from one that would have been edited in a single piece. See more details below.

    The filter uses the go regular expression implementation: https://github.com/google/re2/wiki/Syntax . Due to the streaming nature, matches with zero length are ignored.

    "},{"location":"reference/filters/#memory-handling-and-limitations","title":"Memory handling and limitations","text":"

    In order to avoid unbound buffering of unprocessed data, the sed* filters need to apply some limitations. Some patterns, e.g. .* would allow to match the complete payload, and it could result in trying to buffer it all and potentially causing running out of available memory. Similarly, in case of certain expressions, when they don\u2019t match, it\u2019s impossible to tell if they would match without reading more data from the source, and so would potentially need to buffer the entire payload.

    To prevent too high memory usage, the max buffer size is limited in case of each variant of the filter, by default to 2MiB, which is the same limit as the one we apply when reading the request headers by default. When the limit is reached, and the buffered content matches the pattern, then it is processed by replacing it, when it doesn\u2019t match the pattern, then it is forwarded unchanged. This way, e.g. sed(\".*\", \"\") can be used safely to consume and discard the payload.

    As a result of this, with large payloads, it is possible that the resulting content will be different than if we had run the replacement on the entire content at once. If we have enough preliminary knowledge about the payload, then it may be better to use the delimited variant of the filters, e.g. for line based editing.

    If the max buffer handling is set to \u201cabort\u201d, then the stream editing is stopped and the rest of the payload is dropped.

    "},{"location":"reference/filters/#seddelim","title":"sedDelim","text":"

    Like sed(), but it expects an additional argument, before the optional max buffer size argument, that is used to delimit chunks to be processed at once. The pattern replacement is executed only within the boundaries of the chunks defined by the delimiter, and matches across the chunk boundaries are not considered.

    Example:

    editorRoute: * -> sedDelim(\"foo\", \"bar\", \"\\n\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#sedrequest","title":"sedRequest","text":"

    Like sed(), but for the request content.

    Example:

    editorRoute: * -> sedRequest(\"foo\", \"bar\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#sedrequestdelim","title":"sedRequestDelim","text":"

    Like sedDelim(), but for the request content.

    Example:

    editorRoute: * -> sedRequestDelim(\"foo\", \"bar\", \"\\n\") -> \"https://www.example.org\";\n
    "},{"location":"reference/filters/#authentication-and-authorization","title":"Authentication and Authorization","text":""},{"location":"reference/filters/#basicauth","title":"basicAuth","text":"

    Enable Basic Authentication

    The filter accepts two parameters, the first mandatory one is the path to the htpasswd file usually used with Apache or nginx. The second one is the optional realm name that will be displayed in the browser. MD5, SHA1 and BCrypt are supported for Basic authentication password storage, see also the http-auth module page.

    Examples:

    basicAuth(\"/path/to/htpasswd\")\nbasicAuth(\"/path/to/htpasswd\", \"My Website\")\n
    "},{"location":"reference/filters/#webhook","title":"webhook","text":"

    The webhook filter makes it possible to have your own authentication and authorization endpoint as a filter.

    Headers from the incoming request will be copied into the request that is being done to the webhook endpoint. It is possible to copy headers from the webhook response into the continuing request by specifying the headers to copy as an optional second argument to the filter.

    Responses from the webhook will be treated as follows:

    • Authorized if the status code is less than 300
    • Forbidden if the status code is 403
    • Unauthorized for remaining status codes

    Examples:

    webhook(\"https://custom-webhook.example.org/auth\")\nwebhook(\"https://custom-webhook.example.org/auth\", \"X-Copy-Webhook-Header,X-Copy-Another-Header\")\n

    The webhook timeout has a default of 2 seconds and can be globally changed, if skipper is started with -webhook-timeout=2s flag.

    "},{"location":"reference/filters/#tokeninfo","title":"Tokeninfo","text":"

    Tokeninfo handled by another service. The filters just validate the response from the tokeninfo service to do authorization as defined in the filter.

    "},{"location":"reference/filters/#oauthtokeninfoanyscope","title":"oauthTokeninfoAnyScope","text":"

    If skipper is started with -oauth2-tokeninfo-url flag, you can use this filter.

    The filter accepts variable number of string arguments, which are used to validate the incoming token from the Authorization: Bearer <token> header. There are two rejection scenarios for this filter. If the token is not successfully validated by the oauth server, then a 401 Unauthorised response will be returned. However, if the token is successfully validated but the required scope match isn\u2019t satisfied, then a 403 Forbidden response will be returned. If any of the configured scopes from the filter is found inside the tokeninfo result for the incoming token, it will allow the request to pass.

    Examples:

    oauthTokeninfoAnyScope(\"s1\", \"s2\", \"s3\")\n
    "},{"location":"reference/filters/#oauthtokeninfoallscope","title":"oauthTokeninfoAllScope","text":"

    If skipper is started with -oauth2-tokeninfo-url flag, you can use this filter.

    The filter accepts variable number of string arguments, which are used to validate the incoming token from the Authorization: Bearer <token> header. There are two rejection scenarios for this filter. If the token is not successfully validated by the oauth server, then a 401 Unauthorised response will be returned. However, if the token is successfully validated but the required scope match isn\u2019t satisfied, then a 403 Forbidden response will be returned. If all of the configured scopes from the filter are found inside the tokeninfo result for the incoming token, it will allow the request to pass.

    Examples:

    oauthTokeninfoAllScope(\"s1\", \"s2\", \"s3\")\n
    "},{"location":"reference/filters/#oauthtokeninfoanykv","title":"oauthTokeninfoAnyKV","text":"

    If skipper is started with -oauth2-tokeninfo-url flag, you can use this filter.

    The filter accepts an even number of variable arguments of type string, which are used to validate the incoming token from the Authorization: Bearer <token> header. There are two rejection scenarios for this filter. If the token is not successfully validated by the oauth server, then a 401 Unauthorised response will be returned. However, if the token is successfully validated but the required scope match isn\u2019t satisfied, then a 403 Forbidden response will be returned. If any of the configured key value pairs from the filter is found inside the tokeninfo result for the incoming token, it will allow the request to pass.

    Examples:

    oauthTokeninfoAnyKV(\"k1\", \"v1\", \"k2\", \"v2\")\noauthTokeninfoAnyKV(\"k1\", \"v1\", \"k1\", \"v2\")\n
    "},{"location":"reference/filters/#oauthtokeninfoallkv","title":"oauthTokeninfoAllKV","text":"

    If skipper is started with -oauth2-tokeninfo-url flag, you can use this filter.

    The filter accepts an even number of variable arguments of type string, which are used to validate the incoming token from the Authorization: Bearer <token> header. There are two rejection scenarios for this filter. If the token is not successfully validated by the oauth server, then a 401 Unauthorised response will be returned. However, if the token is successfully validated but the required scope match isn\u2019t satisfied, then a 403 Forbidden response will be returned. If all of the configured key value pairs from the filter are found inside the tokeninfo result for the incoming token, it will allow the request to pass.

    Examples:

    oauthTokeninfoAllKV(\"k1\", \"v1\", \"k2\", \"v2\")\n
    "},{"location":"reference/filters/#tokenintrospection","title":"Tokenintrospection","text":"

    Tokenintrospection handled by another service. The filters just validate the response from the tokenintrospection service to do authorization as defined in the filter.

    "},{"location":"reference/filters/#oauthtokenintrospectionanyclaims","title":"oauthTokenintrospectionAnyClaims","text":"

    The filter accepts variable number of string arguments, which are used to validate the incoming token from the Authorization: Bearer <token> header. The first argument to the filter is the issuer URL, for example https://accounts.google.com, that will be used as described in RFC Draft to find the configuration and for example supported claims.

    If one of the configured and supported claims from the filter are found inside the tokenintrospection (RFC7662) result for the incoming token, it will allow the request to pass.

    Examples:

    oauthTokenintrospectionAnyClaims(\"https://accounts.google.com\", \"c1\", \"c2\", \"c3\")\n
    "},{"location":"reference/filters/#oauthtokenintrospectionallclaims","title":"oauthTokenintrospectionAllClaims","text":"

    The filter accepts variable number of string arguments, which are used to validate the incoming token from the Authorization: Bearer <token> header. The first argument to the filter is the issuer URL, for example https://accounts.google.com, that will be used as described in RFC Draft to find the configuration and for example supported claims.

    If all of the configured and supported claims from the filter are found inside the tokenintrospection (RFC7662) result for the incoming token, it will allow the request to pass.

    Examples:

    oauthTokenintrospectionAllClaims(\"https://accounts.google.com\", \"c1\", \"c2\", \"c3\")\n
    "},{"location":"reference/filters/#oauthtokenintrospectionanykv","title":"oauthTokenintrospectionAnyKV","text":"

    The filter accepts an even number of variable arguments of type string, which are used to validate the incoming token from the Authorization: Bearer <token> header. The first argument to the filter is the issuer URL, for example https://accounts.google.com, that will be used as described in RFC Draft to find the configuration and for example supported claims.

    If one of the configured key value pairs from the filter are found inside the tokenintrospection (RFC7662) result for the incoming token, it will allow the request to pass.

    Examples:

    oauthTokenintrospectionAnyKV(\"https://accounts.google.com\", \"k1\", \"v1\", \"k2\", \"v2\")\noauthTokenintrospectionAnyKV(\"https://accounts.google.com\", \"k1\", \"v1\", \"k1\", \"v2\")\n
    "},{"location":"reference/filters/#oauthtokenintrospectionallkv","title":"oauthTokenintrospectionAllKV","text":"

    The filter accepts an even number of variable arguments of type string, which are used to validate the incoming token from the Authorization: Bearer <token> header. The first argument to the filter is the issuer URL, for example https://accounts.google.com, that will be used as described in RFC Draft to find the configuration and for example supported claims.

    If all of the configured key value pairs from the filter are found inside the tokenintrospection (RFC7662) result for the incoming token, it will allow the request to pass.

    Examples:

    oauthTokenintrospectionAllKV(\"https://accounts.google.com\", \"k1\", \"v1\", \"k2\", \"v2\")\n
    "},{"location":"reference/filters/#secureoauthtokenintrospectionanyclaims","title":"secureOauthTokenintrospectionAnyClaims","text":"

    The filter accepts variable number of string arguments, which are used to validate the incoming token from the Authorization: Bearer <token> header. The first argument to the filter is the issuer URL, for example https://accounts.google.com, that will be used as described in RFC Draft to find the configuration and for example supported claims.

    Second and third arguments are the client-id and client-secret. Use this filter if the Token Introspection endpoint requires authorization to validate and decode the incoming token. The filter will optionally read client-id and client-secret from environment variables: OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET

    If one of the configured and supported claims from the filter are found inside the tokenintrospection (RFC7662) result for the incoming token, it will allow the request to pass.

    Examples:

    secureOauthTokenintrospectionAnyClaims(\"issuerURL\", \"client-id\", \"client-secret\", \"claim1\", \"claim2\")\n

    Read client-id and client-secret from environment variables

    secureOauthTokenintrospectionAnyClaims(\"issuerURL\", \"\", \"\", \"claim1\", \"claim2\")\n

    "},{"location":"reference/filters/#secureoauthtokenintrospectionallclaims","title":"secureOauthTokenintrospectionAllClaims","text":"

    The filter accepts variable number of string arguments, which are used to validate the incoming token from the Authorization: Bearer <token> header. The first argument to the filter is the issuer URL, for example https://accounts.google.com, that will be used as described in RFC Draft to find the configuration and for example supported claims.

    Second and third arguments are the client-id and client-secret. Use this filter if the Token Introspection endpoint requires authorization to validate and decode the incoming token. The filter will optionally read client-id and client-secret from environment variables: OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET

    If all of the configured and supported claims from the filter are found inside the tokenintrospection (RFC7662) result for the incoming token, it will allow the request to pass.

    Examples:

    secureOauthTokenintrospectionAllClaims(\"issuerURL\", \"client-id\", \"client-secret\", \"claim1\", \"claim2\")\n

    Read client-id and client-secret from environment variables

    secureOauthTokenintrospectionAllClaims(\"issuerURL\", \"\", \"\", \"claim1\", \"claim2\")\n

    "},{"location":"reference/filters/#secureoauthtokenintrospectionanykv","title":"secureOauthTokenintrospectionAnyKV","text":"

    The filter accepts an even number of variable arguments of type string, which are used to validate the incoming token from the Authorization: Bearer <token> header. The first argument to the filter is the issuer URL, for example https://accounts.google.com, that will be used as described in RFC Draft to find the configuration and for example supported claims.

    Second and third arguments are the client-id and client-secret. Use this filter if the Token Introspection endpoint requires authorization to validate and decode the incoming token. The filter will optionally read client-id and client-secret from environment variables: OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET

    If one of the configured key value pairs from the filter are found inside the tokenintrospection (RFC7662) result for the incoming token, it will allow the request to pass.

    Examples:

    secureOauthTokenintrospectionAnyKV(\"issuerURL\", \"client-id\", \"client-secret\", \"k1\", \"v1\", \"k2\", \"v2\")\n

    Read client-id and client-secret from environment variables

    secureOauthTokenintrospectionAnyKV(\"issuerURL\", \"\", \"\", \"k1\", \"v1\", \"k2\", \"v2\")\n

    "},{"location":"reference/filters/#secureoauthtokenintrospectionallkv","title":"secureOauthTokenintrospectionAllKV","text":"

    The filter accepts an even number of variable arguments of type string, which are used to validate the incoming token from the Authorization: Bearer <token> header. The first argument to the filter is the issuer URL, for example https://accounts.google.com, that will be used as described in RFC Draft to find the configuration and for example supported claims.

    Second and third arguments are the client-id and client-secret. Use this filter if the Token Introspection endpoint requires authorization to validate and decode the incoming token. The filter will optionally read client-id and client-secret from environment variables: OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET

    If all of the configured key value pairs from the filter are found inside the tokenintrospection (RFC7662) result for the incoming token, it will allow the request to pass.

    Examples:

    secureOauthTokenintrospectionAllKV(\"issuerURL\", \"client-id\", \"client-secret\", \"k1\", \"v1\", \"k2\", \"v2\")\n

    Read client-id and client-secret from environment variables

    secureOauthTokenintrospectionAllKV(\"issuerURL\", \"\", \"\", \"k1\", \"v1\", \"k2\", \"v2\")\n

    "},{"location":"reference/filters/#jwt","title":"JWT","text":""},{"location":"reference/filters/#jwtvalidation","title":"jwtValidation","text":"

    The filter parses bearer jwt token from Authorization header and validates the signature using public keys discovered via /.well-known/openid-configuration endpoint. Takes issuer url as single parameter. The filter stores token claims into the state bag where they can be used by oidcClaimsQuery() or forwardTokenPart()

    Examples:

    jwtValidation(\"https://login.microsoftonline.com/{tenantId}/v2.0\")\n
    "},{"location":"reference/filters/#jwtmetrics","title":"jwtMetrics","text":"

    This filter is experimental and may change in the future, please see tests for example usage.

    The filter parses (but does not validate) JWT token from Authorization request header on response path and increments the following counters:

    • missing-token: request does not have Authorization header
    • invalid-token-type: Authorization header value is not a Bearer type
    • invalid-token: Authorization header does not contain a JWT token
    • missing-issuer: JWT token does not have iss claim
    • invalid-issuer: JWT token does not have any of the configured issuers

    Each counter name uses concatenation of request method, escaped hostname and response status as a prefix, e.g.:

    jwtMetrics.custom.GET.example_org.200.invalid-token\n

    and therefore requires approximately count(HTTP methods) * count(Hosts) * count(Statuses) * 8 bytes of additional memory.

    The filter does nothing if response status is 4xx or route is opt-out via annotation or state bag value.

    The filter requires single string argument that is parsed as YAML. For convenience use flow style format.

    Examples:

    jwtMetrics(\"{issuers: ['https://example.com', 'https://example.org']}\")\n\n// opt-out by annotation\nannotate(\"oauth.disabled\", \"this endpoint is public\") ->\njwtMetrics(\"{issuers: ['https://example.com', 'https://example.org'], optOutAnnotations: [oauth.disabled]}\")\n\n// opt-out by state bag:\n// oauthTokeninfo* and oauthGrant filters store token info in the state bag using \"tokeninfo\" key.\noauthTokeninfoAnyKV(\"foo\", \"bar\") ->\njwtMetrics(\"{issuers: ['https://example.com', 'https://example.org'], optOutStateBag: [tokeninfo]}\")\n
    "},{"location":"reference/filters/#forward-token-data","title":"Forward Token Data","text":""},{"location":"reference/filters/#forwardtoken","title":"forwardToken","text":"

    The filter takes the header name as its first argument and sets header value to the token info or token introspection result serialized as a JSON object. To include only particular fields provide their names as additional arguments.

    If this filter is used when there is no token introspection or token info data then it does not have any effect.

    Examples:

    forwardToken(\"X-Tokeninfo-Forward\")\nforwardToken(\"X-Tokeninfo-Forward\", \"access_token\", \"token_type\")\n
    "},{"location":"reference/filters/#forwardtokenfield","title":"forwardTokenField","text":"

    The filter takes a header name and a field as its first and second arguments. The corresponding field from the result of token info, token introspection or oidc user info is added as corresponding header when the request is passed to the backend.

    If this filter is used when there is no token introspection, token info or oidc user info data then it does not have any effect.

    To forward multiple fields filters can be sequenced

    Examples:

    forwardTokenField(\"X-Tokeninfo-Forward-Oid\", \"oid\") -> forwardTokenField(\"X-Tokeninfo-Forward-Sub\", \"sub\")\n
    "},{"location":"reference/filters/#oauth2","title":"OAuth2","text":""},{"location":"reference/filters/#oauthgrant","title":"oauthGrant","text":"

    Enables authentication and authorization with an OAuth2 authorization code grant flow as specified by RFC 6749 Section 1.3.1. Automatically redirects unauthenticated users to log in at their provider\u2019s authorization endpoint. Supports token refreshing and stores access and refresh tokens in an encrypted cookie. Supports credential rotation for the OAuth2 client ID and secret.

    The filter consumes and drops the grant token request cookie to prevent it from leaking to untrusted downstream services.

    The filter will inject the OAuth2 bearer token into the request headers if the flag oauth2-access-token-header-name is set.

    The filter must be used in conjunction with the grantCallback filter where the OAuth2 provider can redirect authenticated users with an authorization code. Skipper will make sure to add the grantCallback filter for you to your routes when you pass the -enable-oauth2-grant-flow flag.

    The filter may be used with the grantClaimsQuery filter to perform authz and access control.

    The filter also supports javascript login redirect stub that can be used e.g. to store location hash. To enable the stub, add preceding annotate filter with oauthGrant.loginRedirectStub key and HTML content that will be served to the client instead of 307 Temporary Redirect to the authorization URL. The filter will replace {{authCodeURL}} placeholder in the content with the actual authorization URL.

    See the tutorial for step-by-step instructions.

    Examples:

    all:\n    *\n    -> oauthGrant()\n    -> \"http://localhost:9090\";\n
    single_page_app:\n    *\n    -> annotate(\"oauthGrant.loginRedirectStub\", `\n          <!doctype html>\n          <html lang=\"en\">\n            <head>\n              <title>Redirecting...</title>\n              <script>\n                if (window.location.hash !== null) {\n                  localStorage.setItem('original-location-hash', window.location.hash);\n                }\n                window.location.replace('{{authCodeURL}}');\n              </script>\n            </head>\n          </html>\n    `)\n    -> oauthGrant()\n    -> \"http://localhost:9090\";\n

    Skipper arguments:

    Argument Required? Description -enable-oauth2-grant-flow yes toggle flag to enable the oauthGrant() filter. Must be set if you use the filter in routes. Example: -enable-oauth2-grant-flow -oauth2-auth-url yes URL of the OAuth2 provider\u2019s authorize endpoint. Example: -oauth2-auth-url=https://identity.example.com/oauth2/authorize -oauth2-token-url yes URL of the OAuth2 provider\u2019s token endpoint. Example: -oauth2-token-url=https://identity.example.com/oauth2/token -oauth2-tokeninfo-url yes URL of the OAuth2 provider\u2019s tokeninfo endpoint. Example: -oauth2-tokeninfo-url=https://identity.example.com/oauth2/tokeninfo -oauth2-secret-file yes path to the file containing the secret for encrypting and decrypting the grant token cookie (the secret can be anything). Example: -oauth2-secret-file=/path/to/secret -oauth2-client-id-file conditional path to the file containing the OAuth2 client ID. Required if you have not set -oauth2-client-id. Example: -oauth2-client-id-file=/path/to/client_id -oauth2-client-secret-file conditional path to the file containing the OAuth2 client secret. Required if you have not set -oauth2-client-secret. Example: -oauth2-client-secret-file=/path/to/client_secret -oauth2-client-id conditional OAuth2 client ID for authenticating with your OAuth2 provider. Required if you have not set -oauth2-client-id-file. Example: -oauth2-client-id=myclientid -oauth2-client-secret conditional OAuth2 client secret for authenticating with your OAuth2 provider. Required if you have not set -oauth2-client-secret-file. Example: -oauth2-client-secret=myclientsecret -credentials-update-interval no the time interval for updating client id and client secret from files. Example: -credentials-update-interval=30s -oauth2-access-token-header-name no the name of the request header where the user\u2019s bearer token should be set. Example: -oauth2-access-token-header-name=X-Grant-Authorization -oauth2-grant-tokeninfo-keys no comma separated list of keys to preserve in OAuth2 Grant Flow tokeninfo. Default: empty, preserves all tokeninfo keys. Example: -oauth2-grant-tokeninfo-keys=scope,realm,expires_in -oauth2-auth-url-parameters no any additional URL query parameters to set for the OAuth2 provider\u2019s authorize and token endpoint calls. Example: -oauth2-auth-url-parameters=key1=foo,key2=bar -oauth2-callback-path no path of the Skipper route containing the grantCallback() filter for accepting an authorization code and using it to get an access token. Example: -oauth2-callback-path=/oauth/callback -oauth2-token-cookie-name no the name of the cookie where the access tokens should be stored in encrypted form. Default: oauth-grant. Example: -oauth2-token-cookie-name=SESSION -oauth2-token-cookie-remove-subdomains no the number of subdomains to remove from the callback request hostname to obtain token cookie domain. Default: 1. Example: -oauth2-token-cookie-remove-subdomains=0 -oauth2-grant-insecure no omits Secure attribute of the token cookie and uses http scheme for callback url. Default: false"},{"location":"reference/filters/#grantcallback","title":"grantCallback","text":"

    The filter accepts authorization codes as a result of an OAuth2 authorization code grant flow triggered by oauthGrant. It uses the code to request access and refresh tokens from the OAuth2 provider\u2019s token endpoint.

    Examples:

    // The callback route is automatically added when the `-enable-oauth2-grant-flow`\n// flag is passed. You do not need to register it yourself. This is the equivalent\n// of the route that Skipper adds for you:\ncallback:\n    Path(\"/.well-known/oauth2-callback\")\n    -> grantCallback()\n    -> <shunt>;\n

    Skipper arguments:

    Argument Required? Description -oauth2-callback-path no path of the Skipper route containing the grantCallback() filter. Example: -oauth2-callback-path=/oauth/callback"},{"location":"reference/filters/#grantlogout","title":"grantLogout","text":"

    The filter revokes the refresh and access tokens in the cookie set by oauthGrant if -oauth2-revoke-token-url is configured. It also deletes the cookie by setting the Set-Cookie response header to an empty value after a successful token revocation.

    Examples:

    grantLogout()\n

    Skipper arguments:

    Argument Required? Description -oauth2-revoke-token-url no URL of the OAuth2 provider\u2019s token revocation endpoint. Example: -oauth2-revoke-token-url=https://identity.example.com/oauth2/revoke"},{"location":"reference/filters/#grantclaimsquery","title":"grantClaimsQuery","text":"

    The filter allows defining access control rules based on claims in a tokeninfo JSON payload.

    This filter is an alias for oidcClaimsQuery and functions identically to it. See oidcClaimsQuery for more information.

    Examples:

    oauthGrant() -> grantClaimsQuery(\"/path:@_:sub%\\\"userid\\\"\")\noauthGrant() -> grantClaimsQuery(\"/path:scope.#[==\\\"email\\\"]\")\n

    Skipper arguments:

    Argument Required? Description -oauth2-tokeninfo-subject-key yes the key of the attribute containing the OAuth2 subject ID in the OAuth2 provider\u2019s tokeninfo JSON payload. Default: uid. Example: -oauth2-tokeninfo-subject-key=sub"},{"location":"reference/filters/#openid-connect","title":"OpenID Connect","text":"

    To enable OpenID Connect filters use -oidc-secrets-file command line flag.

    "},{"location":"reference/filters/#oauthoidcuserinfo","title":"oauthOidcUserInfo","text":"
    oauthOidcUserInfo(\"https://oidc-provider.example.com\", \"client_id\", \"client_secret\",\n    \"http://target.example.com/subpath/callback\", \"email profile\", \"name email picture\",\n    \"parameter=value\", \"X-Auth-Authorization:claims.email\", \"0\")\n

    The filter needs the following parameters:

    • OpenID Connect Provider URL For example Google OpenID Connect is available on https://accounts.google.com
    • Client ID This value is obtained from the provider upon registration of the application.
    • Client Secret Also obtained from the provider
    • Callback URL The entire path to the callback from the provider on which the token will be received. It can be any value which is a subpath on which the filter is applied.
    • Scopes The OpenID scopes separated by spaces which need to be specified when requesting the token from the provider.
    • Claims The claims which should be present in the token returned by the provider.
    • Auth Code Options (optional) Passes key/value parameters to a provider\u2019s authorization endpoint. The value can be dynamically set by a query parameter with the same key name if the placeholder skipper-request-query is used.
    • Upstream Headers (optional) The upstream endpoint will receive these headers which values are parsed from the OIDC information. The header definition can be one or more header-query pairs, space delimited. The query syntax is GJSON.
    • SubdomainsToRemove (optional, default \u201c1\u201d) Configures number of subdomains to remove from the request hostname to derive OIDC cookie domain. By default one subdomain is removed, e.g. for the www.example.com request hostname the OIDC cookie domain will be example.com (to support SSO for all subdomains of the example.com). Configure \u201c0\u201d to use the same hostname. Note that value is a string.
    "},{"location":"reference/filters/#oauthoidcanyclaims","title":"oauthOidcAnyClaims","text":"
    oauthOidcAnyClaims(\"https://oidc-provider.example.com\", \"client_id\", \"client_secret\",\n    \"http://target.example.com/subpath/callback\", \"email profile\", \"name email picture\",\n    \"parameter=value\", \"X-Auth-Authorization:claims.email\")\n

    The filter needs the following parameters:

    • OpenID Connect Provider URL For example Google OpenID Connect is available on https://accounts.google.com
    • Client ID This value is obtained from the provider upon registration of the application.
    • Client Secret Also obtained from the provider
    • Callback URL The entire path to the callback from the provider on which the token will be received. It can be any value which is a subpath on which the filter is applied.
    • Scopes The OpenID scopes separated by spaces which need to be specified when requesting the token from the provider.
    • Claims Several claims can be specified and the request is allowed as long as at least one of them is present.
    • Auth Code Options (optional) Passes key/value parameters to a provider\u2019s authorization endpoint. The value can be dynamically set by a query parameter with the same key name if the placeholder skipper-request-query is used.
    • Upstream Headers (optional) The upstream endpoint will receive these headers which values are parsed from the OIDC information. The header definition can be one or more header-query pairs, space delimited. The query syntax is GJSON.
    • SubdomainsToRemove (optional, default \u201c1\u201d) Configures number of subdomains to remove from the request hostname to derive OIDC cookie domain. By default one subdomain is removed, e.g. for the www.example.com request hostname the OIDC cookie domain will be example.com (to support SSO for all subdomains of the example.com). Configure \u201c0\u201d to use the same hostname. Note that value is a string.
    "},{"location":"reference/filters/#oauthoidcallclaims","title":"oauthOidcAllClaims","text":"
    oauthOidcAllClaims(\"https://oidc-provider.example.com\", \"client_id\", \"client_secret\",\n    \"http://target.example.com/subpath/callback\", \"email profile\", \"name email picture\",\n    \"parameter=value\", \"X-Auth-Authorization:claims.email\")\n

    The filter needs the following parameters:

    • OpenID Connect Provider URL For example Google OpenID Connect is available on https://accounts.google.com
    • Client ID This value is obtained from the provider upon registration of the application.
    • Client Secret Also obtained from the provider
    • Callback URL The entire path to the callback from the provider on which the token will be received. It can be any value which is a subpath on which the filter is applied.
    • Scopes The OpenID scopes separated by spaces which need to be specified when requesting the token from the provider.
    • Claims Several claims can be specified and the request is allowed only when all claims are present.
    • Auth Code Options (optional) Passes key/value parameters to a provider\u2019s authorization endpoint. The value can be dynamically set by a query parameter with the same key name if the placeholder skipper-request-query is used.
    • Upstream Headers (optional) The upstream endpoint will receive these headers which values are parsed from the OIDC information. The header definition can be one or more header-query pairs, space delimited. The query syntax is GJSON.
    • SubdomainsToRemove (optional, default \u201c1\u201d) Configures number of subdomains to remove from the request hostname to derive OIDC cookie domain. By default one subdomain is removed, e.g. for the www.example.com request hostname the OIDC cookie domain will be example.com (to support SSO for all subdomains of the example.com). Configure \u201c0\u201d to use the same hostname. Note that value is a string.
    "},{"location":"reference/filters/#oidcclaimsquery","title":"oidcClaimsQuery","text":"
    oidcClaimsQuery(\"<path>:[<query>]\", ...)\n

    The filter is chained after oauthOidc* authentication as it parses the ID token that has been saved in the internal StateBag for this request. It validates access control of the requested path against the defined query. It accepts one or more arguments, that is a path prefix which is granted access to when the query definition evaluates positive. It supports exact matches of keys, key-value pairs, introspecting of arrays or exact and wildcard matching of nested structures. The query definition can be one or more queries per path, space delimited. The query syntax is GJSON with a convenience modifier of @_ which unfolds to [@this].#(\"+arg+\")

    Given following example ID token:

    {\n  \"email\": \"someone@example.org\",\n  \"groups\": [\n    \"CD-xyz\",\n    \"appX-Test-Users\",\n    \"Purchasing-Department\"\n  ],\n  \"name\": \"Some One\"\n}\n

    Access to path / would be granted to everyone in example.org, however path /login only to those being member of group \"appX-Tester\":

    oauthOidcAnyClaims(...) -> oidcClaimsQuery(\"/login:groups.#[==\\\"appX-Tester\\\"]\", \"/:@_:email%\\\"*@example.org\\\"\")\n

    For above ID token following query definitions would also be positive:

    oidcClaimsQuery(\"/:email\")\noidcClaimsQuery(\"/another/path:groups.#[%\\\"CD-*\\\"]\")\noidcClaimsQuery(\"/:name%\\\"*One\\\"\", \"/path:groups.#[%\\\"*-Test-Users\\\"] groups.#[==\\\"Purchasing-Department\\\"]\")\n

    As of now there is no negative/deny rule possible. The first matching path is evaluated against the defined query/queries and if positive, permitted.

    "},{"location":"reference/filters/#open-policy-agent","title":"Open Policy Agent","text":"

    To get started with Open Policy Agent, also have a look at the tutorial. This section is only a reference for the implemented filters.

    "},{"location":"reference/filters/#opaauthorizerequest","title":"opaAuthorizeRequest","text":"

    The canonical use case that is also implemented with Envoy External Authorization: Use the http request to evaluate if Skipper should deny the request (with customizable response) or let the request pass to the downstream service

    Example:

    opaAuthorizeRequest(\"my-app-id\")\n

    Example (passing context):

    opaAuthorizeRequest(\"my-app-id\", \"com.mydomain.xxx.myprop: myvalue\")\n

    Data Flows

    The data flow in case the policy allows the request looks like this

                 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510               \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n (1) Request \u2502     Skipper      \u2502 (4) Request   \u2502 Target Application \u2502\n\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524                  \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25ba\u2502                    \u2502\n             \u2502                  \u2502               \u2502                    \u2502\n (6) Response\u2502   (2)\u2502   \u25b2 (3)   \u2502 (5) Response  \u2502                    \u2502\n\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524Req ->\u2502   \u2502 allow \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524                    \u2502\n             \u2502Input \u2502   \u2502       \u2502               \u2502                    \u2502\n             \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524               \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n             \u2502Open Policy Agent \u2502\n             \u2502      \u2502   \u2502       \u2502\n             \u2502      \u2502   \u2502       \u2502\n             \u2502      \u2502   \u2502       \u2502\n             \u2502      \u25bc   \u2502       \u2502\n             \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n             \u2502 \u2502   Policy     \u2502 \u2502\n             \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n             \u2502                  \u2502\n             \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n

    In Step (2) the http request is transformed into an input object following the Envoy structure that is also used by the OPA Envoy plugin. In (3) the decision of the policy is evaluated. If it is equivalent to an \u201callow\u201d, the remaining steps are executed as without the filter.

    The data flow in case the policy disallows the request looks like this

                 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510               \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n (1) Request \u2502     Skipper      \u2502               \u2502 Target Applicatio  \u2502\n\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524                  \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25ba\u2502                    \u2502\n             \u2502                  \u2502               \u2502                    \u2502\n (4) Response\u2502   (2)\u2502   \u25b2 (3)   \u2502               \u2502                    \u2502\n\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524Req ->\u2502   \u2502 allow \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524                    \u2502\n             \u2502Input \u2502   \u2502 =false\u2502               \u2502                    \u2502\n             \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524               \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n             \u2502Open Policy Agent \u2502\n             \u2502      \u2502   \u2502       \u2502\n             \u2502      \u2502   \u2502       \u2502\n             \u2502      \u2502   \u2502       \u2502\n             \u2502      \u25bc   \u2502       \u2502\n             \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n             \u2502 \u2502   Policy     \u2502 \u2502\n             \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n             \u2502                  \u2502\n             \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n

    The difference is that if the decision in (3) is equivalent to false, the response is handled directly from the filter. If the decision contains response body, status or headers those are used to build the response in (6) otherwise a 403 Forbidden with a generic body is returned.

    Manipulating Request Headers

    Headers both to the upstream and the downstream service can be manipulated the same way this works for Envoy external authorization

    This allows both to add and remove unwanted headers in allow/deny cases.

    "},{"location":"reference/filters/#opaauthorizerequestwithbody","title":"opaAuthorizeRequestWithBody","text":"

    Requests can also be authorized based on the request body the same way that is supported with the Open Policy Agent Envoy plugin, look for the input attribute parsed_body in the upstream documentation.

    This filter has the same parameters that the opaAuthorizeRequest filter has.

    A request\u2019s body is parsed up to a maximum size with a default of 1MB that can be configured via the -open-policy-agent-max-request-body-size command line argument. To avoid OOM errors due to too many concurrent authorized body requests, another flag -open-policy-agent-max-memory-body-parsing controls how much memory can be used across all requests with a default of 100MB. If in-flight requests that use body authorization exceed that limit, incoming requests that use the body will be rejected with an internal server error. The number of concurrent requests is

    \\[ n_{max-memory-body-parsing} \\over min(avg(n_{request-content-length}), n_{max-request-body-size}) \\]

    so if requests on average have 100KB and the maximum memory is set to 100MB, on average 1024 authorized requests can be processed concurrently.

    The filter also honors the skip-request-body-parse of the corresponding configuration that the OPA plugin uses.

    "},{"location":"reference/filters/#opaserveresponse","title":"opaServeResponse","text":"

    Always serves the response even if the policy allows the request and can customize the response completely. Can be used to re-implement legacy authorization services by already using data in Open Policy Agent but implementing an old REST API. This can also be useful to support Single Page Applications to return the calling users\u2019 permissions.

    Hint: As there is no real allow/deny in this case and the policy computes the http response, you typically will want to drop all decision logs

    Example:

    opaServeResponse(\"my-app-id\")\n

    Example (passing context):

    opaServeResponse(\"my-app-id\", \"com.mydomain.xxx.myprop: myvalue\")\n

    Data Flows

    For this filter, the data flow looks like this independent of an allow/deny decision

                 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n (1) Request \u2502     Skipper      \u2502\n\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524                  \u251c\n             \u2502                  \u2502\n (4) Response\u2502   (2)\u2502   \u25b2 (3)   \u2502\n\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524Req ->\u2502   \u2502 resp  \u2502\n             \u2502Input \u2502   \u2502       \u2502\n             \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n             \u2502Open Policy Agent \u2502\n             \u2502      \u2502   \u2502       \u2502\n             \u2502      \u2502   \u2502       \u2502\n             \u2502      \u2502   \u2502       \u2502\n             \u2502      \u25bc   \u2502       \u2502\n             \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n             \u2502 \u2502   Policy     \u2502 \u2502\n             \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n             \u2502                  \u2502\n             \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
    "},{"location":"reference/filters/#opaserveresponsewithreqbody","title":"opaServeResponseWithReqBody","text":"

    If you want to serve requests directly from an Open Policy Agent policy that uses the request body, this can be done by using the input.parsed_body attribute the same way that is supported with the Open Policy Agent Envoy plugin.

    This filter has the same parameters that the opaServeResponse filter has.

    A request\u2019s body is parsed up to a maximum size with a default of 1MB that can be configured via the -open-policy-agent-max-request-body-size command line argument. To avoid OOM errors due to too many concurrent authorized body requests, another flag -open-policy-agent-max-memory-body-parsing controls how much memory can be used across all requests with a default of 100MB. If in-flight requests that use body authorization exceed that limit, incoming requests that use the body will be rejected with an internal server error. The number of concurrent requests is

    \\[ n_{max-memory-body-parsing} \\over min(avg(n_{request-content-length}), n_{max-request-body-size}) \\]

    so if requests on average have 100KB and the maximum memory is set to 100MB, on average 1024 authorized requests can be processed concurrently.

    The filter also honors the skip-request-body-parse of the corresponding configuration that the OPA plugin uses.

    "},{"location":"reference/filters/#cookie-handling","title":"Cookie Handling","text":""},{"location":"reference/filters/#droprequestcookie","title":"dropRequestCookie","text":"

    Deletes given cookie from the request header.

    Parameters:

    • cookie name (string)

    Example:

    dropRequestCookie(\"test-session\")\n
    "},{"location":"reference/filters/#dropresponsecookie","title":"dropResponseCookie","text":"

    Deletes given cookie from the response header.

    Parameters:

    • cookie name (string)

    Example:

    dropResponseCookie(\"test-session\")\n
    "},{"location":"reference/filters/#requestcookie","title":"requestCookie","text":"

    Append a cookie to the request header.

    Parameters:

    • cookie name (string)
    • cookie value (string)

    Example:

    requestCookie(\"test-session\", \"abc\")\n
    "},{"location":"reference/filters/#responsecookie","title":"responseCookie","text":"

    Appends a cookie to the response via \u201cSet-Cookie\u201d header. It derives cookie domain by removing one subdomain from the request hostname domain. The filter accepts an optional argument to set the Max-Age attribute of the cookie, of type int, in seconds. Use zero to expire the cookie immediately. An optional fourth argument, \u201cchange-only\u201d, controls if the cookie should be set on every response, or only if the request does not contain a cookie with the provided name and value.

    Example:

    responseCookie(\"test-session\", \"abc\")\nresponseCookie(\"test-session\", \"abc\", 31536000),\nresponseCookie(\"test-session\", \"abc\", 31536000, \"change-only\")\nresponseCookie(\"test-session\", \"deleted\", 0),\n
    "},{"location":"reference/filters/#jscookie","title":"jsCookie","text":"

    The JS cookie behaves exactly as the response cookie, but it does not set the HttpOnly directive, so these cookies will be accessible from JS code running in web browsers.

    Example:

    jsCookie(\"test-session-info\", \"abc-debug\", 31536000, \"change-only\")\n
    "},{"location":"reference/filters/#circuit-breakers","title":"Circuit Breakers","text":""},{"location":"reference/filters/#consecutivebreaker","title":"consecutiveBreaker","text":"

    This breaker opens when the proxy could not connect to a backend or received a >=500 status code at least N times in a row. When open, the proxy returns 503 - Service Unavailable response during the breaker timeout. After this timeout, the breaker goes into half-open state, in which it expects that M number of requests succeed. The requests in the half-open state are accepted concurrently. If any of the requests during the half-open state fails, the breaker goes back to open state. If all succeed, it goes to closed state again.

    Parameters:

    • number of consecutive failures to open (int)
    • timeout (time string, parseable by time.Duration) - optional
    • half-open requests (int) - optional
    • idle-ttl (time string, parseable by time.Duration) - optional

    See also the circuit breaker docs.

    Can be used as egress feature.

    "},{"location":"reference/filters/#ratebreaker","title":"rateBreaker","text":"

    The \u201crate breaker\u201d works similar to the consecutiveBreaker, but instead of considering N consecutive failures for going open, it maintains a sliding window of the last M events, both successes and failures, and opens only when the number of failures reaches N within the window. This way the sliding window is not time based and allows the same breaker characteristics for low and high rate traffic.

    Parameters:

    • number of consecutive failures to open (int)
    • sliding window (int)
    • timeout (time string, parseable by time.Duration) - optional
    • half-open requests (int) - optional
    • idle-ttl (time string, parseable by time.Duration) - optional

    See also the circuit breaker docs.

    Can be used as egress feature.

    "},{"location":"reference/filters/#disablebreaker","title":"disableBreaker","text":"

    Change (or set) the breaker configurations for an individual route and disable for another, in eskip:

    updates: Method(\"POST\") && Host(\"foo.example.org\")\n  -> consecutiveBreaker(9)\n  -> \"https://foo.backend.net\";\n\nbackendHealthcheck: Path(\"/healthcheck\")\n  -> disableBreaker()\n  -> \"https://foo.backend.net\";\n

    See also the circuit breaker docs.

    Can be used as egress feature.

    "},{"location":"reference/filters/#rate-limit","title":"Rate Limit","text":""},{"location":"reference/filters/#localratelimit","title":"localRatelimit","text":"

    DEPRECATED use clientRatelimit with the same settings instead.

    "},{"location":"reference/filters/#clientratelimit","title":"clientRatelimit","text":"

    Per skipper instance calculated ratelimit, that allows number of requests by client. The definition of the same client is based on data of the http header and can be changed with an optional third parameter. If the third parameter is set skipper will use the defined HTTP header to put the request in the same client bucket, else the X-Forwarded-For Header will be used. You need to run skipper with command line flag -enable-ratelimits.

    One filter consumes memory calculated by the following formula, where N is the number of individual clients put into the same bucket, M the maximum number of requests allowed:

    memory = N * M * 15 byte\n

    Memory usage examples:

    • 5MB for M=3 and N=100000
    • 15MB for M=10 and N=100000
    • 150MB for M=100 and N=100000

    Parameters:

    • number of allowed requests per time period (int)
    • time period for requests being counted (time.Duration)
    • optional parameter to set the same client by header, in case the provided string contains ,, it will combine all these headers (string)
    clientRatelimit(3, \"1m\")\nclientRatelimit(3, \"1m\", \"Authorization\")\nclientRatelimit(3, \"1m\", \"X-Foo,Authorization,X-Bar\")\n

    See also the ratelimit docs.

    "},{"location":"reference/filters/#ratelimit","title":"ratelimit","text":"

    Per skipper instance calculated ratelimit, that allows forwarding a number of requests to the backend group. You need to run skipper with command line flag -enable-ratelimits.

    Parameters:

    • number of allowed requests per time period (int)
    • time period for requests being counted (time.Duration)
    • response status code to use for a rate limited request - optional, default: 429
    ratelimit(20, \"1m\")\nratelimit(300, \"1h\")\nratelimit(4000, \"1m\", 503)\n

    See also the ratelimit docs.

    "},{"location":"reference/filters/#clusterclientratelimit","title":"clusterClientRatelimit","text":"

    This ratelimit is calculated across all skipper peers and the same rate limit group. The first parameter is a string to select the same ratelimit group across one or more routes. The rate limit group allows the given number of requests by client. The client identity is derived from the value of the X-Forwarded-For header or client IP address and can be changed with an optional fourth parameter. The optional fourth parameter may specify comma-separated list of header names. Skipper will join header values to obtain client identity. If identity value is empty (i.e. when all header values are empty or missing) then ratelimit does not apply.

    You need to run skipper with command line flags -enable-swarm and -enable-ratelimits. See also our cluster ratelimit tutorial

    Parameters:

    • rate limit group (string)
    • number of allowed requests per time period (int)
    • time period for requests being counted (time.Duration)
    • optional parameter to set the same client by header, in case the provided string contains ,, it will combine all these headers (string)
    clusterClientRatelimit(\"groupA\", 10, \"1h\")\nclusterClientRatelimit(\"groupA\", 10, \"1h\", \"Authorization\")\nclusterClientRatelimit(\"groupA\", 10, \"1h\", \"X-Forwarded-For,Authorization,User-Agent\")\n

    See also the ratelimit docs.

    "},{"location":"reference/filters/#clusterratelimit","title":"clusterRatelimit","text":"

    This ratelimit is calculated across all skipper peers and the same rate limit group. The first parameter is a string to select the same ratelimit group across one or more routes. The rate limit group allows the given number of requests to a backend.

    You need to run skipper with command line flags -enable-swarm and -enable-ratelimits. See also our cluster ratelimit tutorial

    Parameters:

    • rate limit group (string)
    • number of allowed requests per time period (int)
    • time period for requests being counted (time.Duration)
    • response status code to use for a rate limited request - optional, default: 429
    clusterRatelimit(\"groupB\", 20, \"1m\")\nclusterRatelimit(\"groupB\", 300, \"1h\")\nclusterRatelimit(\"groupB\", 4000, \"1m\", 503)\n

    See also the ratelimit docs.

    "},{"location":"reference/filters/#backendratelimit","title":"backendRatelimit","text":"

    The filter configures request rate limit for each backend endpoint within rate limit group across all Skipper peers. When limit is reached Skipper refuses to forward the request to the backend and responds with 503 Service Unavailable status to the client, i.e. implements load shedding.

    It is similar to clusterClientRatelimit filter but counts request rate using backend endpoint address instead of incoming request IP address or a HTTP header. Requires command line flags -enable-swarm and -enable-ratelimits.

    Both rate limiting and load shedding can use the exact same mechanism to protect the backend but the key difference is the semantics:

    • rate limiting should adopt 4XX and inform the client that they are exceeding some quota. It doesn\u2019t depend on the current capacity of the backend.
    • load shedding should adopt 5XX and inform the client that the backend is not able to provide the service. It depends on the current capacity of the backend.

    Parameters:

    • rate limit group (string)
    • number of allowed requests per time period (int)
    • timeframe for requests being counted (time.Duration)
    • response status code to use for rejected requests - optional, default: 503

    Multiple filter definitions using the same group must use the same number of allowed requests and timeframe values.

    Examples:

    foo: Path(\"/foo\")\n  -> backendRatelimit(\"foobar\", 100, \"1s\")\n  -> <\"http://backend1\", \"http://backend2\">;\n\nbar: Path(\"/bar\")\n  -> backendRatelimit(\"foobar\", 100, \"1s\")\n  -> <\"http://backend1\", \"http://backend2\">;\n
    Configures rate limit of 100 requests per second for each backend1 and backend2 regardless of the request path by using the same group name, number of request and timeframe parameters.

    foo: Path(\"/foo\")\n  -> backendRatelimit(\"foo\", 40, \"1s\")\n  -> <\"http://backend1\", \"http://backend2\">;\n\nbar: Path(\"/bar\")\n  -> backendRatelimit(\"bar\", 80, \"1s\")\n  -> <\"http://backend1\", \"http://backend2\">;\n
    Configures rate limit of 40 requests per second for each backend1 and backend2 for the /foo requests and 80 requests per second for the /bar requests by using different group name per path. The total request rate each backend receives can not exceed 40+80=120 requests per second.

    foo: Path(\"/baz\")\n  -> backendRatelimit(\"baz\", 100, \"1s\", 429)\n  -> <\"http://backend1\", \"http://backend2\">;\n
    Configures rate limit of 100 requests per second for each backend1 and backend2 and responds with 429 Too Many Requests when limit is reached.

    "},{"location":"reference/filters/#clusterleakybucketratelimit","title":"clusterLeakyBucketRatelimit","text":"

    Implements leaky bucket rate limit algorithm that uses Redis as a storage. Requires command line flags -enable-ratelimits, -enable-swarm and -swarm-redis-urls to be set.

    The leaky bucket is an algorithm based on an analogy of how a bucket with a constant leak will overflow if either the average rate at which water is poured in exceeds the rate at which the bucket leaks or if more water than the capacity of the bucket is poured in all at once, see https://en.wikipedia.org/wiki/Leaky_bucket

    Parameters:

    • label (string)
    • leak rate volume (int)
    • leak rate period (time.Duration)
    • capacity (int)
    • increment (int)

    The bucket label, leak rate (volume/period) and capacity uniquely identify the bucket.

    Label supports template placeholders. If a template placeholder can\u2019t be resolved then request is allowed and does not add to any bucket.

    Leak rate (divided by increment) defines a maximum average allowed request rate. The rate is configured by two parameters for convenience and consistency with other filters but is actually a single number, e.g. the rate of 2 per second equals to the rate of 20 per 10 seconds or 120 per minute.

    Capacity defines the maximum request burst size or an allowed jitter.

    Each passing request adds increment amount to the bucket, different routes may add different amounts to the same bucket.

    Configuration with equal capacity and increment allows no jitter: first request fills up the bucket full and subsequent request will be rejected if it arrives earlier than emission interval = 1/leak rate.

    Real requests always have a jitter which can be demonstrated by the configuration having capacity and increment of one:

    r1: * -> clusterLeakyBucketRatelimit(\"1rps\", 1, \"1s\", 1, 1) -> status(200) -> <shunt>;\n
    it does not allow jitter and therefore rejects ~half of the requests coming at rate of 1 rps:
    $ echo \"GET http://localhost:9090\" | vegeta attack -rate=1/s -duration=1m | vegeta report\nRequests      [total, rate, throughput]  60, 1.02, 0.58\nDuration      [total, attack, wait]      59.001991855s, 59.000310522s, 1.681333ms\nLatencies     [mean, 50, 95, 99, max]    1.721207ms, 1.555227ms, 1.943115ms, 10.689486ms, 11.538278ms\nBytes In      [total, mean]              0, 0.00\nBytes Out     [total, mean]              0, 0.00\nSuccess       [ratio]                    56.67%\nStatus Codes  [code:count]               200:34  429:26\nError Set:\n429 Too Many Requests\n

    On the other hand the configuration with capacity greater than increment:

    r1: * -> clusterLeakyBucketRatelimit(\"1rps2\", 1, \"1s\", 2, 1) -> status(200) -> <shunt>;\n
    allows all requests:
    ~$ echo \"GET http://localhost:9090\" | vegeta attack -rate=1/s -duration=1m | vegeta report\nRequests      [total, rate, throughput]  60, 1.02, 1.02\nDuration      [total, attack, wait]      59.00023518s, 58.999779118s, 456.062\u00b5s\nLatencies     [mean, 50, 95, 99, max]    1.410641ms, 1.585908ms, 1.859727ms, 8.285963ms, 8.997149ms\nBytes In      [total, mean]              0, 0.00\nBytes Out     [total, mean]              0, 0.00\nSuccess       [ratio]                    100.00%\nStatus Codes  [code:count]               200:60\nError Set:\n
    and even if rate is greater than 1 rps the average allowed request rate is still equal to the leak rate of 1 rps:
    $ echo \"GET http://localhost:9090\" | vegeta attack -rate=11/10s -duration=1m | vegeta report\nRequests      [total, rate, throughput]  66, 1.12, 1.03\nDuration      [total, attack, wait]      59.091880389s, 59.089985762s, 1.894627ms\nLatencies     [mean, 50, 95, 99, max]    1.709568ms, 1.60613ms, 1.925731ms, 10.601822ms, 12.10052ms\nBytes In      [total, mean]              0, 0.00\nBytes Out     [total, mean]              0, 0.00\nSuccess       [ratio]                    92.42%\nStatus Codes  [code:count]               200:61  429:5\nError Set:\n429 Too Many Requests\n

    Therefore the capacity should be configured greater than increment unless strict request interval needs to be enforced. Configuration having capacity below increment rejects all requests.

    Examples:

    // allow each unique Authorization header once in five seconds\nclusterLeakyBucketRatelimit(\"auth-${request.header.Authorization}\", 1, \"5s\", 2, 1)\n\n// allow 60 requests per hour (each subsequent request allowed not earlied than after 1h/60 = 1m) for all clients\nclusterLeakyBucketRatelimit(\"hourly\", 60, \"1h\", 1, 1)\n\n// allow 10 requests per minute for each unique PHPSESSID cookie with bursts of up to 5 requests\nclusterLeakyBucketRatelimit(\"session-${request.cookie.PHPSESSID}\", 10, \"1m\", 5, 1)\n\n// use the same bucket but add different amount (i.e. one /expensive request counts as two /cheap)\nPath(\"/cheap\")     -> clusterLeakyBucketRatelimit(\"user-${request.cookie.Authorization}\", 1, \"1s\", 5, 1) -> ...\nPath(\"/expensive\") -> clusterLeakyBucketRatelimit(\"user-${request.cookie.Authorization}\", 1, \"1s\", 5, 2) -> ...\n

    "},{"location":"reference/filters/#ratelimitfailclosed","title":"ratelimitFailClosed","text":"

    This filter changes the failure mode for all rate limit filters of the route. By default rate limit filters fail open on infrastructure errors (e.g. when redis is down) and allow requests. When this filter is present on the route, rate limit filters will fail closed in case of infrastructure errors and deny requests.

    Examples:

    fail_open: * -> clusterRatelimit(\"g\",10, \"1s\")\nfail_closed: * -> ratelimitFailClosed() -> clusterRatelimit(\"g\", 10, \"1s\")\n

    In case clusterRatelimit could not reach the swarm (e.g. redis):

    • Route fail_open will allow the request
    • Route fail_closed will deny the request
    "},{"location":"reference/filters/#load-shedding","title":"Load Shedding","text":"

    The basic idea of load shedding is to reduce errors by early stopping some of the ingress requests that create too much load and serving the maximum throughput the system can process at a point in time.

    There is a great talk by Acacio Cruz from Google that explains the basic principles.

    "},{"location":"reference/filters/#admissioncontrol","title":"admissionControl","text":"

    Implements an admission control filter, that rejects traffic by observed error rate and probability. If it rejects a request skipper will respond with status code 503.

    The probability of rejection is calculated by the following equation:

    \\[ P_{reject} = ( { n_{total} - { n_{success} \\over threshold } \\over n_{total} + 1} )^{ exponent } \\]

    Examples:

    admissionControl(metricSuffix, mode, d, windowSize, minRPS, successThreshold, maxRejectProbability, exponent)\nadmissionControl(\"myapp\", \"active\", \"1s\", 5, 10, 0.95, 0.9, 0.5)\n

    Parameters:

    • metric suffix (string)
    • mode (enum)
    • d (time.Duration)
    • window size (int)
    • minRps (int)
    • success threshold (float64)
    • max reject probability (float64)
    • exponent (float64)

    Metric suffix is the chosen suffix key to expose reject counter, should be unique by filter instance

    Mode has 3 different possible values:

    • \u201cactive\u201d will reject traffic
    • \u201cinactive\u201d will never reject traffic
    • \u201clogInactive\u201d will not reject traffic, but log to debug filter settings

    D the time duration of a single slot for required counters in our circular buffer of window size.

    Window size is the size of the circular buffer. It is used to snapshot counters to calculate total requests and number of success. It is within \\([1, 100]\\).

    MinRps is the minimum requests per second that have to pass this filter otherwise it will not reject traffic.

    Success threshold sets the lowest request success rate at which the filter will not reject requests. It is within \\((0,1]\\). A value of 0.95 means an error rate of lower than 5% will not trigger rejects.

    Max reject probability sets the upper bound of reject probability. It is within (0,1]. A value of 0.95 means if backend errors with 100% it will only reject up to 95%.

    exponent is used to dictate the rejection probability. The calculation is done by \\(p = p^{exponent}\\) The exponent value is within \\((0,\\infty]\\), to increase rejection probability you have to use values lower than 1:

    • 1: linear
    • 1/2: quadratic
    • 1/3: cubic
    "},{"location":"reference/filters/#lua","title":"lua","text":"

    See the scripts page

    "},{"location":"reference/filters/#logs","title":"Logs","text":""},{"location":"reference/filters/#accesslogdisabled","title":"accessLogDisabled","text":"

    Deprecated: use disableAccessLog or enableAccessLog

    The accessLogDisabled filter overrides global Skipper AccessLogDisabled setting for a specific route, which allows to either turn-off the access log for specific route while access log, in general, is enabled or vice versa.

    Example:

    accessLogDisabled(\"false\")\n
    "},{"location":"reference/filters/#disableaccesslog","title":"disableAccessLog","text":"

    Filter overrides global Skipper AccessLogDisabled setting and allows to turn-off the access log for specific route while access log, in general, is enabled. It is also possible to disable access logs only for a subset of response codes from backend by providing an optional list of response code prefixes.

    Parameters:

    • response code prefixes (variadic int) - optional

    Example:

    disableAccessLog()\ndisableAccessLog(1, 301, 40)\n

    This disables logs of all requests with status codes 1xxs, 301 and all 40xs.

    "},{"location":"reference/filters/#enableaccesslog","title":"enableAccessLog","text":"

    Filter overrides global Skipper AccessLogDisabled setting and allows to turn-on the access log for specific route while access log, in general, is disabled. It is also possible to enable access logs only for a subset of response codes from backend by providing an optional list of response code prefixes.

    Parameters:

    • response code prefixes (variadic int) - optional

    Example:

    enableAccessLog()\nenableAccessLog(1, 301, 20)\n

    This enables logs of all requests with status codes 1xxs, 301 and all 20xs.

    "},{"location":"reference/filters/#auditlog","title":"auditLog","text":"

    Filter auditLog() logs the request and N bytes of the body into the log file. N defaults to 1024 and can be overridden with -max-audit-body=<int>. N=0 omits logging the body.

    Example:

    auditLog()\n
    "},{"location":"reference/filters/#unverifiedauditlog","title":"unverifiedAuditLog","text":"

    Filter unverifiedAuditLog() adds a Header, X-Unverified-Audit, to the request, the content of which, will also be written to the log file. By default, the value of the audit header will be equal to the value of the sub key, from the Authorization token. This can be changed by providing a string input to the filter which matches another key from the token.

    N.B. It is important to note that, if the content of the X-Unverified-Audit header does not match the following regex, then a default value of invalid-sub will be populated in the header instead: ^[a-zA-Z0-9_/:?=&%@.#-]*$

    Examples:

    unverifiedAuditLog()\n
    unverifiedAuditLog(\"azp\")\n
    "},{"location":"reference/filters/#backend","title":"Backend","text":""},{"location":"reference/filters/#backendisproxy","title":"backendIsProxy","text":"

    Notifies the proxy that the backend handling this request is also a proxy. The proxy type is based in the URL scheme which can be either http, https or socks5.

    Keep in mind that Skipper currently cannot handle CONNECT requests by tunneling the traffic to the target destination, however, the CONNECT requests can be forwarded to a different proxy using this filter.

    Example:

    foo1:\n  *\n  -> backendIsProxy()\n  -> \"http://proxy.example.com\";\n\nfoo2:\n  *\n  -> backendIsProxy()\n  -> <roundRobin, \"http://proxy1.example.com\", \"http://proxy2.example.com\">;\n\nfoo3:\n  *\n  -> setDynamicBackendUrl(\"http://proxy.example.com\")\n  -> backendIsProxy()\n  -> <dynamic>;\n
    "},{"location":"reference/filters/#setdynamicbackendhostfromheader","title":"setDynamicBackendHostFromHeader","text":"

    Filter sets the backend host for a route, value is taken from the provided header. Can be used only with <dynamic> backend. Meant to be used together with setDynamicBackendSchemeFromHeader or setDynamicBackendScheme. If this filter chained together with setDynamicBackendUrlFromHeader or setDynamicBackendUrl filters, the latter ones would have priority.

    Parameters:

    • header name (string)

    Example:

    foo: * -> setDynamicBackendHostFromHeader(\"X-Forwarded-Host\") -> <dynamic>;\n
    "},{"location":"reference/filters/#setdynamicbackendschemefromheader","title":"setDynamicBackendSchemeFromHeader","text":"

    Filter sets the backend scheme for a route, value is taken from the provided header. Can be used only with <dynamic> backend. Meant to be used together with setDynamicBackendHostFromHeader or setDynamicBackendHost. If this filter chained together with setDynamicBackendUrlFromHeader or setDynamicBackendUrl, the latter ones would have priority.

    Parameters:

    • header name (string)

    Example:

    foo: * -> setDynamicBackendSchemeFromHeader(\"X-Forwarded-Proto\") -> <dynamic>;\n
    "},{"location":"reference/filters/#setdynamicbackendurlfromheader","title":"setDynamicBackendUrlFromHeader","text":"

    Filter sets the backend url for a route, value is taken from the provided header. Can be used only with <dynamic> backend.

    Parameters:

    • header name (string)

    Example:

    foo: * -> setDynamicBackendUrlFromHeader(\"X-Custom-Url\") -> <dynamic>;\n
    "},{"location":"reference/filters/#setdynamicbackendhost","title":"setDynamicBackendHost","text":"

    Filter sets the backend host for a route. Can be used only with <dynamic> backend. Meant to be used together with setDynamicBackendSchemeFromHeader or setDynamicBackendScheme. If this filter chained together with setDynamicBackendUrlFromHeader or setDynamicBackendUrl, the latter ones would have priority.

    Parameters:

    • host (string)

    Example:

    foo: * -> setDynamicBackendHost(\"example.com\") -> <dynamic>;\n
    "},{"location":"reference/filters/#setdynamicbackendscheme","title":"setDynamicBackendScheme","text":"

    Filter sets the backend scheme for a route. Can be used only with <dynamic> backend. Meant to be used together with setDynamicBackendHostFromHeader or setDynamicBackendHost. If this filter chained together with setDynamicBackendUrlFromHeader or setDynamicBackendUrl, the latter ones would have priority.

    Parameters:

    • scheme (string)

    Example:

    foo: * -> setDynamicBackendScheme(\"https\") -> <dynamic>;\n
    "},{"location":"reference/filters/#setdynamicbackendurl","title":"setDynamicBackendUrl","text":"

    Filter sets the backend url for a route. Can be used only with <dynamic> backend.

    Parameters:

    • url (string)

    Example:

    foo: * -> setDynamicBackendUrl(\"https://example.com\") -> <dynamic>;\n
    "},{"location":"reference/filters/#apiusagemonitoring","title":"apiUsageMonitoring","text":"

    The apiUsageMonitoring filter adds API related metrics to the Skipper monitoring. It is by default not activated. Activate it by providing the -enable-api-usage-monitoring flag at Skipper startup. In its deactivated state, it is still registered as a valid filter (allowing route configurations to specify it), but will perform no operation. That allows, per instance, production environments to use it and testing environments not to while keeping the same route configuration for all environments.

    For the client based metrics, additional flags need to be specified.

    Flag Description api-usage-monitoring-realm-keys Name of the property in the JWT JSON body that contains the name of the realm. api-usage-monitoring-client-keys Name of the property in the JWT JSON body that contains the name of the client. api-usage-monitoring-realms-tracking-pattern RegEx of realms to be monitored. Defaults to \u2018services\u2019.

    NOTE: Make sure to activate the metrics flavour proper to your environment using the metrics-flavour flag in order to get those metrics.

    Example:

    skipper -metrics-flavour prometheus -enable-api-usage-monitoring -api-usage-monitoring-realm-keys=\"realm\" -api-usage-monitoring-client-keys=\"managed-id\" api-usage-monitoring-realms-tracking-pattern=\"services,users\"\n

    The structure of the metrics is all of those elements, separated by . dots:

    Part Description apiUsageMonitoring.custom Every filter metrics starts with the name of the filter followed by custom. This part is constant. Application ID Identifier of the application, configured in the filter under app_id. Tag Tag of the application (e.g. staging), configured in the filter under tag. API ID Identifier of the API, configured in the filter under api_id. Method The request\u2019s method (verb), capitalized (ex: GET, POST, PUT, DELETE). Path The request\u2019s path, in the form of the path template configured in the filter under path_templates. Realm The realm in which the client is authenticated. Client Identifier under which the client is authenticated. Metric Name Name (or key) of the metric being tracked."},{"location":"reference/filters/#available-metrics","title":"Available Metrics","text":""},{"location":"reference/filters/#endpoint-related-metrics","title":"Endpoint Related Metrics","text":"

    Those metrics are not identifying the realm and client. They always have * in their place.

    Example:

                                                                                          + Realm\n                                                                                      |\napiUsageMonitoring.custom.orders-backend.staging.orders-api.GET.foo/orders/{order-id}.*.*.http_count\n                                                                                        | |\n                                                                                        | + Metric Name\n                                                                                        + Client\n

    The available metrics are:

    Type Metric Name Description Counter http_count number of HTTP exchanges Counter http1xx_count number of HTTP exchanges resulting in information (HTTP status in the 100s) Counter http2xx_count number of HTTP exchanges resulting in success (HTTP status in the 200s) Counter http3xx_count number of HTTP exchanges resulting in a redirect (HTTP status in the 300s) Counter http4xx_count number of HTTP exchanges resulting in a client error (HTTP status in the 400s) Counter http5xx_count number of HTTP exchanges resulting in a server error (HTTP status in the 500s) Histogram latency time between the first observable moment (a call to the filter\u2019s Request) until the last (a call to the filter\u2019s Response)"},{"location":"reference/filters/#client-related-metrics","title":"Client Related Metrics","text":"

    Those metrics are not identifying endpoint (path) and HTTP verb. They always have * as their place.

    Example:

                                                                + HTTP Verb\n                                                            | + Path Template     + Metric Name\n                                                            | |                   |\napiUsageMonitoring.custom.orders-backend.staging.orders-api.*.*.users.mmustermann.http_count\n                                                                |     |\n                                                                |     + Client\n                                                                + Realm\n

    The available metrics are:

    Type Metric Name Description Counter http_count number of HTTP exchanges Counter http1xx_count number of HTTP exchanges resulting in information (HTTP status in the 100s) Counter http2xx_count number of HTTP exchanges resulting in success (HTTP status in the 200s) Counter http3xx_count number of HTTP exchanges resulting in a redirect (HTTP status in the 300s) Counter http4xx_count number of HTTP exchanges resulting in a client error (HTTP status in the 400s) Counter http5xx_count number of HTTP exchanges resulting in a server error (HTTP status in the 500s) Counter latency_sum sum of seconds (in decimal form) between the first observable moment (a call to the filter\u2019s Request) until the last (a call to the filter\u2019s Response)"},{"location":"reference/filters/#filter-configuration","title":"Filter Configuration","text":"

    Endpoints can be monitored using the apiUsageMonitoring filter in the route. It accepts JSON objects (as strings) of the format mentioned below. In case any of the required parameters is missing, no-op filter is created, i.e. no metrics are captured, but the creation of the route does not fail.

    api-usage-monitoring-configuration:\n  type: object\n  required:\n    - application_id\n    - api_id\n    - path_templates\n  properties:\n    application_id:\n      type: string\n      description: ID of the application\n      example: order-service\n    tag:\n      type: string\n      description: tag of the application\n      example: staging\n    api_id:\n      type: string\n      description: ID of the API\n      example: orders-api\n    path_templates:\n      description: Endpoints to be monitored.\n      type: array\n      minLength: 1\n      items:\n        type: string\n        description: >\n          Path template in /articles/{article-id} (OpenAPI 3) or in /articles/:article-id format.\n          NOTE: They will be normalized to the :this format for metrics naming.\n        example: /orders/{order-id}\n    client_tracking_pattern:\n        description: >\n            The pattern that matches client id in form of a regular expression.\n\n            By default (if undefined), it is set to `.*`.\n\n            An empty string disables the client metrics completely.\n        type: string\n        examples:\n            all_services:\n                summary: All services are tracked (for all activated realms).\n                value: \".*\"\n            just_some_services:\n                summary: Only services `orders-service` and `shipment-service` are tracked.\n                value: \"(orders\\-service|shipment\\-service)\"\n

    Configuration Example:

    apiUsageMonitoring(`\n    {\n        \"application_id\": \"my-app\",\n        \"tag\": \"staging\",\n        \"api_id\": \"orders-api\",\n        \"path_templates\": [\n            \"foo/orders\",\n            \"foo/orders/:order-id\",\n            \"foo/orders/:order-id/order_item/{order-item-id}\"\n        ],\n        \"client_tracking_pattern\": \"(shipping\\-service|payment\\-service)\"\n    }`,`{\n        \"application_id\": \"my-app\",\n        \"api_id\": \"customers-api\",\n        \"path_templates\": [\n            \"/foo/customers/\",\n            \"/foo/customers/{customer-id}/\"\n        ]\n    }\n`)\n

    Based on the previous configuration, here is an example of a counter metric.

    apiUsageMonitoring.custom.my-app.staging.orders-api.GET.foo/orders/{order-id}.*.*.http_count\n

    Note that a missing tag in the configuration will be replaced by {no-tag} in the metric:

    apiUsageMonitoring.custom.my-app.{no-tag}.customers-api.GET.foo/customers.*.*.http_count\n

    Here is the Prometheus query to obtain it.

    sum(rate(skipper_custom_total{key=\"apiUsageMonitoring.custom.my-app.staging.orders-api.GET.foo/orders/{order-id}.*.*.http_count\"}[60s])) by (key)\n

    Here is an example of a histogram metric.

    apiUsageMonitoring.custom.my_app.staging.orders-api.POST.foo/orders.latency\n

    Here is the Prometheus query to obtain it.

    histogram_quantile(0.5, sum(rate(skipper_custom_duration_seconds_bucket{key=\"apiUsageMonitoring.custom.my-app.staging.orders-api.POST.foo/orders.*.*.latency\"}[60s])) by (le, key))\n

    NOTE: Non configured paths will be tracked with {unknown} Application ID, Tag, API ID and path template.

    However, if all application_ids of your configuration refer to the same application, the filter assume that also non configured paths will be directed to this application. E.g.:

    apiUsageMonitoring.custom.my-app.{unknown}.{unknown}.GET.{no-match}.*.*.http_count\n
    "},{"location":"reference/filters/#originmarker","title":"originMarker","text":"

    This filter is used to measure the time it took to create a route. Other than that, it\u2019s a no-op. You can include the same origin marker when you re-create the route. As long as the origin and id are the same, the route creation time will not be measured again. If there are multiple origin markers with the same origin, the earliest timestamp will be used.

    Parameters:

    • the name of the origin
    • the ID of the object that is the logical source for the route
    • the creation timestamp (rfc3339)

    Example:

    originMarker(\"apiUsageMonitoring\", \"deployment1\", \"2019-08-30T09:55:51Z\")\n
    "},{"location":"reference/filters/#scheduler","title":"Scheduler","text":""},{"location":"reference/filters/#fifo","title":"fifo","text":"

    This Filter is similar to the lifo filter in regards to parameters and status codes.

    It turned out that lifo() filter can hurt performance at high load. On AWS instance c6g.8xlarge lifo filter had a limit of 21000 requests per second on a single instance. The fifo() filter had not hit a limit at 30000 requests per second. If you use TCP-LIFO, then request processing is already in LIFO style.

    Parameters:

    • MaxConcurrency specifies how many goroutines are allowed to work on this queue (int)
    • MaxQueueSize sets the queue size (int)
    • Timeout sets the timeout to get request scheduled (time)

    Example:

    fifo(100, 150, \"10s\")\n
    "},{"location":"reference/filters/#fifowithbody","title":"fifoWithBody","text":"

    This Filter is similar to the lifo filter in regards to parameters and status codes. Performance considerations are similar to fifo.

    The difference between fifo and fifoWithBody is that fifo will decrement the concurrency as soon as the backend sent response headers and fifoWithBody will decrement the concurrency if the response body was served. Normally both are very similar, but if you have a fully async component that serves multiple website fragments, this would decrement concurrency too early.

    Parameters:

    • MaxConcurrency specifies how many goroutines are allowed to work on this queue (int)
    • MaxQueueSize sets the queue size (int)
    • Timeout sets the timeout to get request scheduled (time)

    Example:

    fifoWithBody(100, 150, \"10s\")\n
    "},{"location":"reference/filters/#lifo","title":"lifo","text":"

    This Filter changes skipper to handle the route with a bounded last in first out queue (LIFO), instead of an unbounded first in first out queue (FIFO). The default skipper scheduler is based on Go net/http package, which provides an unbounded FIFO request handling. If you enable this filter the request scheduling will change to a LIFO. The idea of a LIFO queue is based on Dropbox bandaid proxy, which is not opensource. Dropbox shared their idea in a public blogpost. All bounded scheduler filters will respond requests with server status error codes in case of overrun.

    All scheduler filters return HTTP status code:

    • 502, if the specified timeout is reached, because a request could not be scheduled fast enough
    • 503, if the queue is full

    Parameters:

    • MaxConcurrency specifies how many goroutines are allowed to work on this queue(int)
    • MaxQueueSize sets the queue size (int)
    • Timeout sets the timeout to get request scheduled (time)

    Example:

    lifo(100, 150, \"10s\")\n

    The above configuration will set MaxConcurrency to 100, MaxQueueSize to 150 and Timeout to 10 seconds.

    When there are multiple lifo filters on the route, only the last one will be applied.

    "},{"location":"reference/filters/#lifogroup","title":"lifoGroup","text":"

    This filter is similar to the lifo filter.

    Parameters:

    • GroupName to group multiple one or many routes to the same queue, which have to have the same settings (string)
    • MaxConcurrency specifies how many goroutines are allowed to work on this queue(int)
    • MaxQueueSize sets the queue size (int)
    • Timeout sets the timeout to get request scheduled (time)

    Example:

    lifoGroup(\"mygroup\", 100, 150, \"10s\")\n

    The above configuration will set MaxConcurrency to 100, MaxQueueSize to 150 and Timeout to 10 seconds for the lifoGroup \u201cmygroup\u201d, that can be shared between multiple routes.

    It is enough to set the concurrency, queue size and timeout parameters for one instance of the filter in the group, and only the group name for the rest. Setting these values for multiple instances is fine, too. While only one of them will be used as the source for the applied settings, if there is accidentally a difference between the settings in the same group, a warning will be logged.

    It is possible to use the lifoGroup filter together with the single lifo filter, e.g. if a route belongs to a group, but needs to have additional stricter settings then the whole group.

    "},{"location":"reference/filters/#rfc-compliance","title":"RFC Compliance","text":""},{"location":"reference/filters/#rfchost","title":"rfcHost","text":"

    This filter removes the optional trailing dot in the outgoing host header.

    Example:

    rfcHost()\n
    "},{"location":"reference/filters/#rfcpath","title":"rfcPath","text":"

    This filter forces an alternative interpretation of the RFC 2616 and RFC 3986 standards, where paths containing reserved characters will have these characters unescaped when the incoming request also has them unescaped.

    Example:

    Path(\"/api/*id) -> rfcPath() -> \"http://api-backend\"\n

    In the above case, if the incoming request has something like foo%2Fbar in the id position, the api-backend service will also receive it in the format foo%2Fbar, while without the rfcPath() filter the outgoing request path will become /api/foo/bar.

    In case we want to use the id while routing the request, we can use the backend. Example:

    api: Path(\"/api/:id\") -> setPath(\"/api/${id}/summary\") -> \"http://api-backend\";\npatch: Path(\"/api/*id\") -> rfcPath() -> <loopback>;\n

    In the above case, if the incoming request path is /api/foo%2Fbar, it will match the \u2018patch\u2019 route, and then the patched request will match the api route, and the api-backend service will receive a request with the path /api/foo%2Fbar/summary.

    It is also possible to enable this behavior centrally for a Skipper instance with the -rfc-patch-path flag. See URI standards interpretation.

    "},{"location":"reference/filters/#egress","title":"Egress","text":""},{"location":"reference/filters/#setrequestheaderfromsecret","title":"setRequestHeaderFromSecret","text":"

    This filter sets request header to the secret value with optional prefix and suffix. This is only for use cases using skipper as sidecar to inject tokens for the application on the egress path, if it\u2019s used in the ingress path you likely create a security issue for your application.

    This filter should be used as an egress only feature.

    Parameters:

    • header name (string)
    • secret name (string)
    • value prefix (string) - optional
    • value suffix (string) - optional

    Example:

    egress1: Method(\"GET\") -> setRequestHeaderFromSecret(\"Authorization\", \"/tmp/secrets/get-token\") -> \"https://api.example.com\";\negress2: Method(\"POST\") -> setRequestHeaderFromSecret(\"Authorization\", \"/tmp/secrets/post-token\", \"foo-\") -> \"https://api.example.com\";\negress3: Method(\"PUT\") -> setRequestHeaderFromSecret(\"X-Secret\", \"/tmp/secrets/put-token\", \"bar-\", \"-baz\") -> \"https://api.example.com\";\n

    To use setRequestHeaderFromSecret filter you need to run skipper with -credentials-paths=/tmp/secrets and specify an update interval -credentials-update-interval=10s. Files in the credentials path can be a directory, which will be able to find all files within this directory, but it won\u2019t walk subtrees. For the example case, there have to be get-token, post-token and put-token files within the specified credential paths /tmp/secrets/, resulting in /tmp/secrets/get-token, /tmp/secrets/post-token and /tmp/secrets/put-token.

    "},{"location":"reference/filters/#bearerinjector","title":"bearerinjector","text":"

    This filter injects Bearer tokens into Authorization headers read from file providing the token as content.

    It is a special form of setRequestHeaderFromSecret with \"Authorization\" header name, \"Bearer \" prefix and empty suffix.

    Example:

    egress: * -> bearerinjector(\"/tmp/secrets/my-token\") -> \"https://api.example.com\";\n\n// equivalent to setRequestHeaderFromSecret(\"Authorization\", \"/tmp/secrets/my-token\", \"Bearer \")\n
    "},{"location":"reference/filters/#open-tracing","title":"Open Tracing","text":""},{"location":"reference/filters/#tracingbaggagetotag","title":"tracingBaggageToTag","text":"

    This filter adds an opentracing tag for a given baggage item in the trace.

    Syntax:

    tracingBaggageToTag(\"<baggage_item_name>\", \"<tag_name>\")\n

    Example: If a trace consists of baggage item named foo with a value bar. Adding below filter will add a tag named baz with value bar

    tracingBaggageToTag(\"foo\", \"baz\")\n

    "},{"location":"reference/filters/#statebagtotag","title":"stateBagToTag","text":"

    This filter sets an opentracing tag from the filter context (state bag). If the provided key (first parameter) cannot be found in the state bag, then it doesn\u2019t set the tag.

    Parameters:

    • key in the state bag (string)
    • tag name (string)

    The route in the following example checks whether the request is authorized with the oauthTokeninfoAllScope() filter. This filter stores the authenticated user with \u201cauth-user\u201d key in the context, and the stateBagToTag() filter in the next step stores it in the opentracing tag \u201cclient_id\u201d:

    foo: * -> oauthTokeninfoAllScope(\"address_service.all\") -> stateBagToTag(\"auth-user\", \"client_id\") -> \"https://backend.example.org\";\n
    "},{"location":"reference/filters/#tracingtag","title":"tracingTag","text":"

    This filter adds an opentracing tag.

    Syntax:

    tracingTag(\"<tag_name>\", \"<tag_value>\")\n

    Tag value may contain template placeholders. If a template placeholder can\u2019t be resolved then filter does not set the tag.

    Example: Adding the below filter will add a tag named foo with the value bar.

    tracingTag(\"foo\", \"bar\")\n

    Example: Set tag from request header

    tracingTag(\"http.flow_id\", \"${request.header.X-Flow-Id}\")\n

    "},{"location":"reference/filters/#tracingtagfromresponse","title":"tracingTagFromResponse","text":"

    This filter works just like tracingTag, but is applied after the request was processed. In particular, template placeholders referencing the response can be used in the parameters.

    "},{"location":"reference/filters/#tracingspanname","title":"tracingSpanName","text":"

    This filter sets the name of the outgoing (client span) in opentracing. The default name is \u201cproxy\u201d. Example:

    tracingSpanName(\"api-operation\")\n
    "},{"location":"reference/filters/#load-balancing","title":"Load Balancing","text":"

    Some filters influence how load balancing will be done

    "},{"location":"reference/filters/#fadein","title":"fadeIn","text":"

    When this filter is set, and the route has a load balanced backend using supported algorithm, then the newly added endpoints will receive the traffic in a gradually increasing way, starting from their detection for the specified duration, after which they receive equal amount traffic as the previously existing routes. The detection time of an load balanced backend endpoint is preserved over multiple generations of the route configuration (over route changes). This filter can be used to saturate the load of autoscaling applications that require a warm-up time and therefore a smooth ramp-up. The fade-in feature can be used together with the roundRobin, random or consistentHash LB algorithms.

    While the default fade-in curve is linear, the optional exponent parameter can be used to adjust the shape of the fade-in curve, based on the following equation:

    current_rate = proportional_rate * min((now - detected) / duration, 1) ^ exponent

    Parameters:

    • duration: duration of the fade-in in milliseconds or as a duration string
    • fade-in curve exponent - optional: a floating point number, default: 1

    Examples:

    fadeIn(\"3m\")\nfadeIn(\"3m\", 1.5)\n
    "},{"location":"reference/filters/#warning-on-fadein-and-rolling-restarts","title":"Warning on fadeIn and Rolling Restarts","text":"

    Traffic fade-in has the potential to skew the traffic to your backend pods in case of a rolling restart (kubectl rollout restart), because it is very likely that the rolling restart is going faster than the fade-in duration. The image below shows an example of a rolling restart for a four-pod deployment (A, B, C, D) into (E, F, G, H), and the traffic share of each pod over time. While the ramp-up of the new pods is ongoing, the remaining old pods will receive a largely increased traffic share (especially the last one, D in this example), as well as an over-propotional traffic share for the first pod in the rollout (E).

    To make rolling restarts safe, you need to slow them down by setting spec.minReadySeconds on the pod spec of your deployment or stackset, according to your fadeIn duration.

    "},{"location":"reference/filters/#endpointcreated","title":"endpointCreated","text":"

    This filter marks the creation time of a load balanced endpoint. When used together with the fadeIn filter, it prevents missing the detection of a new backend instance with the same hostname. This filter is typically automatically appended, and it\u2019s parameters are based on external sources, e.g. the Kubernetes API.

    Parameters:

    • the address of the endpoint
    • timestamp, either as a number of seconds since the unix epocs, or a string in RFC3339 format

    Example:

    endpointCreated(\"http://10.0.0.1:8080\", \"2020-12-18T15:30:00Z01:00\")\n
    "},{"location":"reference/filters/#consistenthashkey","title":"consistentHashKey","text":"

    This filter sets the request key used by the consistentHash algorithm to select the backend endpoint.

    Parameters:

    • key (string)

    The key should contain template placeholders, without placeholders the key is constant and therefore all requests would be made to the same endpoint. The algorithm will use the default key if any of the template placeholders can\u2019t be resolved.

    Examples:

    pr: Path(\"/products/:productId\")\n    -> consistentHashKey(\"${productId}\")\n    -> <consistentHash, \"http://127.0.0.1:9998\", \"http://127.0.0.1:9997\">;\n
    consistentHashKey(\"${request.header.Authorization}\")\nconsistentHashKey(\"${request.source}\") // same as the default key\n

    "},{"location":"reference/filters/#consistenthashbalancefactor","title":"consistentHashBalanceFactor","text":"

    This filter sets the balance factor used by the consistentHash algorithm to prevent a single backend endpoint from being overloaded. The number of in-flight requests for an endpoint can be no higher than (average-in-flight-requests * balanceFactor) + 1. This is helpful in the case where certain keys are very popular and threaten to overload the endpoint they are mapped to. Further Details.

    Parameters:

    • balanceFactor: A float or int, must be >= 1

    Examples:

    pr: Path(\"/products/:productId\")\n    -> consistentHashKey(\"${productId}\")\n    -> consistentHashBalanceFactor(1.25)\n    -> <consistentHash, \"http://127.0.0.1:9998\", \"http://127.0.0.1:9997\">;\n
    consistentHashBalanceFactor(3)\n

    "},{"location":"reference/plugins/","title":"Skipper plugins","text":"

    Skipper may be extended with functionality not present in the core. These additions can be built as go plugin, so they do not have to be present in the main skipper repository.

    Note the warning from Go\u2019s plugin.go:

    // The plugin support is currently incomplete, only supports Linux,\n// and has known bugs. Please report any issues.\n

    Note the known problem of using plugins together with vendoring, best described here:

    https://github.com/golang/go/issues/20481

    "},{"location":"reference/plugins/#plugin-directories","title":"Plugin directories","text":"

    Plugins are loaded from sub directories of the plugin directories. By default the plugin directory is set to ./plugins (i.e. relative to skipper\u2019s working directory). An additional directory may be given with the -plugindir=/path/to/dir option to skipper.

    Any file with the suffix .so found below the plugin directories (also in sub directories) is attempted to load without any arguments. When a plugin needs an argument, this must be explicitly loaded and the arguments passed, e.g. with -filter-plugin geoip,db=/path/to/db.

    "},{"location":"reference/plugins/#building-a-plugin","title":"Building a plugin","text":"

    Each plugin should be built with Go version >= 1.11, enabled Go modules support similar to the following build command line:

    go build -buildmode=plugin -o example.so example.go\n

    There are some pitfalls:

    • packages which are shared between skipper and the plugin must not be in a vendor/ directory, otherwise the plugin will fail to load or in some cases give wrong results (e.g. an opentracing span cannot be found in the context even if it is present). This also means: Do not vendor skipper in a plugin repo\u2026
    • plugins must be rebuilt when skipper is rebuilt
    • do not attempt to rebuild a module and copy it over a loaded plugin, that will crash skipper immediately\u2026
    "},{"location":"reference/plugins/#use-a-plugin","title":"Use a plugin","text":"

    In this example we use a geoip database, that you need to find and download. We expect that you did a git clone git@github.com:zalando/skipper.git and entered the directory.

    Build skipper:

    % make skipper\n

    Install filter plugins:

    % mkdir plugins\n% git clone git@github.com:skipper-plugins/filters.git plugins/filters\n% ls plugins/filters\ngeoip/  glide.lock  glide.yaml  ldapauth/  Makefile  noop/  plugin_test.go\n% cd plugins/filters/geoip\n% go build -buildmode=plugin -o geoip.so geoip.go\n% cd -\n~/go/src/github.com/zalando/skipper\n

    Start a pseudo backend that shows all headers in plain:

    % nc -l 9000\n

    Run the proxy with geoip database:

    % ./bin/skipper -filter-plugin geoip,db=$HOME/Downloads/GeoLite2-City_20181127/GeoLite2-City.mmdb -inline-routes '* -> geoip() -> \"http://127.0.0.1:9000\"'\n[APP]INFO[0000] found plugin geoip at plugins/filters/geoip/geoip.so\n[APP]INFO[0000] loaded plugin geoip (geoip) from plugins/filters/geoip/geoip.so\n[APP]INFO[0000] attempting to load plugin from plugins/filters/geoip/geoip.so\n[APP]INFO[0000] plugin geoip already loaded with InitFilter\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] proxy listener on :9090\n[APP]INFO[0000] route settings, reset, route: : * -> geoip() -> \"http://127.0.0.1:9000\"\n[APP]INFO[0000] certPathTLS or keyPathTLS not found, defaulting to HTTP\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] route settings applied\n

    Or passing a yaml file via config-file flag:

    inline-routes: '* -> geoip() -> \"http://127.0.0.1:9000\"'\nfilter-plugin:\n  geoip:\n    - db=$HOME/Downloads/GeoLite2-City_20181127/GeoLite2-City.mmdb\n

    Use a client to lookup geoip:

    % curl -H\"X-Forwarded-For: 107.12.53.5\" localhost:9090/\n^C\n

    pseudo backend should show X-Geoip-Country header:

    # nc -l 9000\nGET / HTTP/1.1\nHost: 127.0.0.1:9000\nUser-Agent: curl/7.49.0\nAccept: */*\nX-Forwarded-For: 107.12.53.5\nX-Geoip-Country: US\nAccept-Encoding: gzip\n^C\n

    skipper should show additional log lines, because of the CTRL-C:

    [APP]ERRO[0082] error while proxying, route  with backend http://127.0.0.1:9000, status code 500: dialing failed false: EOF\n107.12.53.5 - - [28/Nov/2018:14:39:40 +0100] \"GET / HTTP/1.1\" 500 22 \"-\" \"curl/7.49.0\" 2753 localhost:9090 - -\n
    "},{"location":"reference/plugins/#filter-plugins","title":"Filter plugins","text":"

    All plugins must have a function named InitFilter with the following signature

    func([]string) (filters.Spec, error)\n

    The parameters passed are all arguments for the plugin, i.e. everything after the first word from skipper\u2019s -filter-plugin parameter. E.g. when the -filter-plugin parameter is

    myfilter,datafile=/path/to/file,foo=bar\n

    the myfilter plugin will receive

    []string{\"datafile=/path/to/file\", \"foo=bar\"}\n

    as arguments.

    The filter plugin implementation is responsible to parse the received arguments.

    Filter plugins can be found in the filter repo

    "},{"location":"reference/plugins/#example-filter-plugin","title":"Example filter plugin","text":"

    An example noop plugin looks like

    package main\n\nimport (\n    \"github.com/zalando/skipper/filters\"\n)\n\ntype noopSpec struct{}\n\nfunc InitFilter(opts []string) (filters.Spec, error) {\n    return noopSpec{}, nil\n}\n\nfunc (s noopSpec) Name() string {\n    return \"noop\"\n}\nfunc (s noopSpec) CreateFilter(config []interface{}) (filters.Filter, error) {\n    return noopFilter{}, nil\n}\n\ntype noopFilter struct{}\n\nfunc (f noopFilter) Request(filters.FilterContext)  {}\nfunc (f noopFilter) Response(filters.FilterContext) {}\n
    "},{"location":"reference/plugins/#predicate-plugins","title":"Predicate plugins","text":"

    All plugins must have a function named InitPredicate with the following signature

    func([]string) (routing.PredicateSpec, error)\n

    The parameters passed are all arguments for the plugin, i.e. everything after the first word from skipper\u2019s -predicate-plugin parameter. E.g. when the -predicate-plugin parameter is

    mypred,datafile=/path/to/file,foo=bar\n

    the mypred plugin will receive

    []string{\"datafile=/path/to/file\", \"foo=bar\"}\n

    as arguments.

    The predicate plugin implementation is responsible to parse the received arguments.

    Predicate plugins can be found in the predicate repo

    "},{"location":"reference/plugins/#example-predicate-plugin","title":"Example predicate plugin","text":"

    An example MatchAll plugin looks like

    package main\n\nimport (\n    \"github.com/zalando/skipper/routing\"\n    \"net/http\"\n)\n\ntype noopSpec struct{}\n\nfunc InitPredicate(opts []string) (routing.PredicateSpec, error) {\n    return noopSpec{}, nil\n}\n\nfunc (s noopSpec) Name() string {\n    return \"MatchAll\"\n}\nfunc (s noopSpec) Create(config []interface{}) (routing.Predicate, error) {\n    return noopPredicate{}, nil\n}\n\ntype noopPredicate struct{}\n\nfunc (p noopPredicate) Match(*http.Request) bool {\n    return true\n}\n
    "},{"location":"reference/plugins/#dataclient-plugins","title":"DataClient plugins","text":"

    Similar to the above predicate and filter plugins. The command line option for data client plugins is -dataclient-plugin. The module must have a InitDataClient function with the signature

    func([]string) (routing.DataClient, error)\n

    A noop data client looks like

    package main\n\nimport (\n    \"github.com/zalando/skipper/eskip\"\n    \"github.com/zalando/skipper/routing\"\n)\n\nfunc InitDataClient([]string) (routing.DataClient, error) {\n    var dc DataClient = \"\"\n    return dc, nil\n}\n\ntype DataClient string\n\nfunc (dc DataClient) LoadAll() ([]*eskip.Route, error) {\n    return eskip.Parse(string(dc))\n}\n\nfunc (dc DataClient) LoadUpdate() ([]*eskip.Route, []string, error) {\n    return nil, nil, nil\n}\n
    "},{"location":"reference/plugins/#multitype-plugins","title":"MultiType plugins","text":"

    Sometimes it is necessary to combine multiple plugin types into one module. This can be done with this kind of plugin. Note that these modules are not auto loaded, these need an explicit -multi-plugin name,arg1,arg2 command line switch for skipper.

    The module must have a InitPlugin function with the signature

    func([]string) ([]filters.Spec, []routing.PredicateSpec, []routing.DataClient, error)\n

    Any of the returned types may be nil, so you can have e.g. a combined filter / data client plugin or share a filter and a predicate, e.g. like

    package main\n\nimport (\n    \"fmt\"\n    \"net\"\n    \"net/http\"\n    \"strconv\"\n    \"strings\"\n\n    ot \"github.com/opentracing/opentracing-go\"\n    maxminddb \"github.com/oschwald/maxminddb-golang\"\n\n    \"github.com/zalando/skipper/filters\"\n    snet \"github.com/zalando/skipper/net\"\n    \"github.com/zalando/skipper/predicates\"\n    \"github.com/zalando/skipper/routing\"\n)\n\ntype geoipSpec struct {\n    db   *maxminddb.Reader\n    name string\n}\n\nfunc InitPlugin(opts []string) ([]filters.Spec, []routing.PredicateSpec, []routing.DataClient, error) {\n    var db string\n    for _, o := range opts {\n        switch {\n        case strings.HasPrefix(o, \"db=\"):\n            db = o[3:]\n        }\n    }\n    if db == \"\" {\n        return nil, nil, nil, fmt.Errorf(\"missing db= parameter for geoip plugin\")\n    }\n    reader, err := maxminddb.Open(db)\n    if err != nil {\n        return nil, nil, nil, fmt.Errorf(\"failed to open db %s: %s\", db, err)\n    }\n\n    return []filters.Spec{&geoipSpec{db: reader, name: \"geoip\"}},\n        []routing.PredicateSpec{&geoipSpec{db: reader, name: \"GeoIP\"}},\n        nil,\n        nil\n}\n\nfunc (s *geoipSpec) Name() string {\n    return s.name\n}\n\nfunc (s *geoipSpec) CreateFilter(config []interface{}) (filters.Filter, error) {\n    var fromLast bool\n    header := \"X-GeoIP-Country\"\n    var err error\n    for _, c := range config {\n        if s, ok := c.(string); ok {\n            switch {\n            case strings.HasPrefix(s, \"from_last=\"):\n                fromLast, err = strconv.ParseBool(s[10:])\n                if err != nil {\n                    return nil, filters.ErrInvalidFilterParameters\n                }\n            case strings.HasPrefix(s, \"header=\"):\n                header = s[7:]\n            }\n        }\n    }\n    return &geoip{db: s.db, fromLast: fromLast, header: header}, nil\n}\n\nfunc (s *geoipSpec) Create(config []interface{}) (routing.Predicate, error) {\n    var fromLast bool\n    var err error\n    countries := make(map[string]struct{})\n    for _, c := range config {\n        if s, ok := c.(string); ok {\n            switch {\n            case strings.HasPrefix(s, \"from_last=\"):\n                fromLast, err = strconv.ParseBool(s[10:])\n                if err != nil {\n                    return nil, predicates.ErrInvalidPredicateParameters\n                }\n            default:\n                countries[strings.ToUpper(s)] = struct{}{}\n            }\n        }\n    }\n    return &geoip{db: s.db, fromLast: fromLast, countries: countries}, nil\n}\n\ntype geoip struct {\n    db        *maxminddb.Reader\n    fromLast  bool\n    header    string\n    countries map[string]struct{}\n}\n\ntype countryRecord struct {\n    Country struct {\n        ISOCode string `maxminddb:\"iso_code\"`\n    } `maxminddb:\"country\"`\n}\n\nfunc (g *geoip) lookup(r *http.Request) string {\n    var src net.IP\n    if g.fromLast {\n        src = snet.RemoteHostFromLast(r)\n    } else {\n        src = snet.RemoteHost(r)\n    }\n\n    record := countryRecord{}\n    err := g.db.Lookup(src, &record)\n    if err != nil {\n        fmt.Printf(\"geoip(): failed to lookup %s: %s\", src, err)\n    }\n    if record.Country.ISOCode == \"\" {\n        return \"UNKNOWN\"\n    }\n    return record.Country.ISOCode\n}\n\nfunc (g *geoip) Request(c filters.FilterContext) {\n    c.Request().Header.Set(g.header, g.lookup(c.Request()))\n}\n\nfunc (g *geoip) Response(c filters.FilterContext) {}\n\nfunc (g *geoip) Match(r *http.Request) bool {\n    span := ot.SpanFromContext(r.Context())\n    if span != nil {\n        span.LogKV(\"GeoIP\", \"start\")\n    }\n\n    code := g.lookup(r)\n    _, ok := g.countries[code]\n\n    if span != nil {\n        span.LogKV(\"GeoIP\", code)\n    }\n    return ok\n}\n
    "},{"location":"reference/plugins/#opentracing-plugins","title":"OpenTracing plugins","text":"

    The tracers, except for noop, are built as Go Plugins. A tracing plugin can be loaded with -opentracing NAME as parameter to skipper.

    Implementations of OpenTracing API can be found in the https://github.com/skipper-plugins/opentracing repository.

    All plugins must have a function named InitTracer with the following signature

    func([]string) (opentracing.Tracer, error)\n

    The parameters passed are all arguments for the plugin, i.e. everything after the first word from skipper\u2019s -opentracing parameter. E.g. when the -opentracing parameter is mytracer foo=bar token=xxx somename=bla:3 the \u201cmytracer\u201d plugin will receive

    []string{\"foo=bar\", \"token=xxx\", \"somename=bla:3\"}\n

    as arguments.

    The tracer plugin implementation is responsible to parse the received arguments.

    An example plugin looks like

    package main\n\nimport (\n     basic \"github.com/opentracing/basictracer-go\"\n     opentracing \"github.com/opentracing/opentracing-go\"\n)\n\nfunc InitTracer(opts []string) (opentracing.Tracer, error) {\n     return basic.NewTracerWithOptions(basic.Options{\n         Recorder:       basic.NewInMemoryRecorder(),\n         ShouldSample:   func(traceID uint64) bool { return traceID%64 == 0 },\n         MaxLogsPerSpan: 25,\n     }), nil\n}\n

    "},{"location":"reference/predicates/","title":"Skipper Predicates","text":"

    Predicates are used to decide which route will handle an incoming request. Routes can contain multiple predicates. A request will match a route only if all the predicates of the route match. See the description of the route matching mechanism here: Route matching.

    Example route with a Host, Method and Path match predicates and a backend:

    all: Host(/^my-host-header\\.example\\.org$/) && Method(\"GET\") && Path(\"/hello\") -> \"http://127.0.0.1:1234/\";\n
    "},{"location":"reference/predicates/#predicate-arguments","title":"Predicate arguments","text":"

    The predicate arguments can be strings, regular expressions or numbers (float64, int). In the eskip syntax representation:

    • strings are surrounded by double quotes (\"). When necessary, characters can be escaped by \\, e.g. \\\\ or \\\".
    • regular expressions are a re2 regular expression, surrounded by
    • /, e.g. /^www\\.example\\.org(:\\d+)?$/. When a predicate expects a regular expression as an argument, the string representation with double quotes can be used, as well.
    • numbers are regular (decimal) numbers like 401 or 1.23456. The eskip syntax doesn\u2019t define a limitation on the size of the numbers, but the underlying implementation currently relies on the float64 values of the Go runtime.

    Other higher level argument types must be represented as one of the above types. E.g. it is a convention to represent time duration values as strings, parseable by time.Duration).

    "},{"location":"reference/predicates/#the-path-tree","title":"The path tree","text":"

    There is an important difference between the evaluation of the Path or PathSubtree predicates, and the evaluation of all the other predicates (PathRegexp belonging to the second group). Find an explanation in the Route matching section explanation section.

    "},{"location":"reference/predicates/#path","title":"Path","text":"

    The path predicate is used to match the path in HTTP request line. It accepts a single argument, that can be a fixed path like \u201c/some/path\u201d, or it can contain wildcards. There can be only zero or one path predicate in a route.

    Wildcards:

    Wildcards can be put in place of one or more path segments in the path, e.g. \u201c/some/:dir/:name\u201d, or the path can end with a free wildcard like \"/some/path/*param\", where the free wildcard can match against a sub-path with multiple segments. Note, that this solution implicitly supports the glob standard, e.g. \"/some/path/**\" will work as expected. The wildcards must follow a /.

    The arguments are available to the filters while processing the matched requests, but currently only a few built-in filters utilize them, and they can be used rather only from custom filter extensions.

    Known bug:

    There is a known bug with how predicates of the form Path(\"/foo/*\") are currently handled. Note the wildcard defined with * doesn\u2019t have a name here. Wildcards must have a name, but Skipper currently does not reject these routes, resulting in undefined behavior.

    Trailing slash:

    By default, Path(\"/foo\") and Path(\"/foo/\") are not equivalent. Ignoring the trailing slash can be toggled with the -ignore-trailing-slash command line flag.

    Examples:

    Path(\"/foo/bar\")     //   /foo/bar\nPath(\"/foo/bar/\")    //   /foo/bar/, unless started with -ignore-trailing-slash\nPath(\"/foo/:id\")     //   /foo/_anything\nPath(\"/foo/:id/baz\") //   /foo/_anything/baz\nPath(\"/foo/*rest\")   //   /foo/bar/baz\nPath(\"/foo/**\")      //   /foo/bar/baz\n
    "},{"location":"reference/predicates/#pathsubtree","title":"PathSubtree","text":"

    The path subtree predicate behaves similar to the path predicate, but it matches the exact path in the definition and any sub path below it. The subpath is automatically provided among the path parameters with the name *. If a free wildcard is appended to the definition, e.g. PathSubtree(\"/some/path/*rest\"), the free wildcard name is used instead of *. The simple wildcards behave similar to the Path predicate. The main difference between PathSubtree(\"/foo\") and Path(\"/foo/**\") is that the PathSubtree predicate always ignores the trailing slashes.

    Examples:

    PathSubtree(\"/foo/bar\")\nPathSubtree(\"/\")\nPathSubtree(\"/foo/*rest\")\n
    "},{"location":"reference/predicates/#pathregexp","title":"PathRegexp","text":"

    Regular expressions to match the path. It uses Go\u2019s standard library regexp package to match, which is based on re2 regular expression syntax.

    Parameters:

    • PathRegexp (regex)

    A route can contain more than one PathRegexp predicates. It can be also used in combination with the Path predicate.

    Path(\"/colors/:name/rgb-value\") && PathRegexp(\"^/colors/(red|green|blue|cyan|magenta|pink|yellow)/\")\n-> returnRGB()\n-> <shunt>\n

    Further examples:

    PathRegexp(\"^/foo/bar\")\nPathRegexp(\"/foo/bar$\")\nPathRegexp(\"/foo/bar/\")\nPathRegexp(\"^/foo/(bar|qux)\")\n
    "},{"location":"reference/predicates/#host","title":"Host","text":"

    Regular expressions that the host header in the request must match.

    Parameters:

    • Host (regex)

    Examples:

    Host(/^my-host-header\\.example\\.org$/)\nHost(/header\\.example\\.org$/)\n
    "},{"location":"reference/predicates/#hostany","title":"HostAny","text":"

    Evaluates to true if request host exactly equals to any of the configured hostnames.

    Parameters:

    • hostnames (string)

    Examples:

    HostAny(\"www.example.org\", \"www.example.com\")\nHostAny(\"localhost:9090\")\n
    "},{"location":"reference/predicates/#forwarded-header-predicates","title":"Forwarded header predicates","text":"

    Uses standardized Forwarded header (RFC 7239)

    More info about the header: MDN

    If multiple proxies chain values in the header, as a comma separated list, the predicates below will only match the last value in the chain for each part of the header.

    Example: Forwarded: host=example.com;proto=https, host=example.org

    • ForwardedHost(/^example\\.com$/) - does not match
    • ForwardedHost(/^example\\.org$/) - matches
    • ForwardedHost(/^example\\.org$/) && ForwardedProto(\"https\") - matches
    • ForwardedHost(/^example\\.com$/) && ForwardedProto(\"https\") - does not match
    "},{"location":"reference/predicates/#forwardedhost","title":"ForwardedHost","text":"

    Regular expressions that the forwarded host header in the request must match.

    Parameters:

    • Host (regex)

    Examples:

    ForwardedHost(/^my-host-header\\.example\\.org$/)\nForwardedHost(/header\\.example\\.org$/)\n
    "},{"location":"reference/predicates/#forwardedprotocol","title":"ForwardedProtocol","text":"

    Protocol the forwarded header in the request must match.

    Parameters:

    • Protocol (string)

    Only \u201chttp\u201d and \u201chttps\u201d values are allowed

    Examples:

    ForwardedProtocol(\"http\")\nForwardedProtocol(\"https\")\n
    "},{"location":"reference/predicates/#weight","title":"Weight","text":"

    By default, the weight (priority) of a route is determined by the number of defined predicates.

    If you want to give a route more priority, you can give it more weight.

    Parameters:

    • Weight (int)

    Example where route2 has more priority because it has more predicates:

    route1: Path(\"/test\") -> \"http://www.zalando.de\";\nroute2: Path(\"/test\") && True() -> \"http://www.zalando.de\";\n

    Example where route1 has more priority because it has more weight:

    route1: Path(\"/test\") && Weight(100) -> \"http://www.zalando.de\";\nroute2: Path(\"/test\") && True() && True() -> \"http://www.zalando.de\";\n
    "},{"location":"reference/predicates/#true","title":"True","text":"

    Does always match. Before Weight predicate existed this was used to give a route more weight.

    Example where route2 has more weight.

    route1: Path(\"/test\") -> \"http://www.zalando.de\";\nroute2: Path(\"/test\") && True() -> \"http://www.github.com\";\n
    "},{"location":"reference/predicates/#false","title":"False","text":"

    Does not match. Can be used to disable certain routes.

    Example where route2 is disabled.

    route1: Path(\"/test\") -> \"http://www.zalando.de\";\nroute2: Path(\"/test\") && False() -> \"http://www.github.com\";\n
    "},{"location":"reference/predicates/#shutdown","title":"Shutdown","text":"

    Evaluates to true if Skipper is shutting down. Can be used to create customized healthcheck.

    health_up: Path(\"/health\") -> inlineContent(\"OK\") -> <shunt>;\nhealth_down: Path(\"/health\") && Shutdown() -> status(503) -> inlineContent(\"shutdown\") -> <shunt>;\n
    "},{"location":"reference/predicates/#method","title":"Method","text":"

    The HTTP method that the request must match. HTTP methods are one of GET, HEAD, PATCH, POST, PUT, DELETE, OPTIONS, CONNECT, TRACE.

    Parameters:

    • Method (string)

    Examples:

    Method(\"GET\")\nMethod(\"OPTIONS\")\n
    "},{"location":"reference/predicates/#methods","title":"Methods","text":"

    The HTTP method that the request must match. HTTP methods are one of GET, HEAD, PATCH, POST, PUT, DELETE, OPTIONS, CONNECT, TRACE.

    Parameters:

    • Method (\u2026string) methods names

    Examples:

    Methods(\"GET\")\nMethods(\"OPTIONS\", \"POST\")\nMethods(\"OPTIONS\", \"POST\", \"patch\")\n
    "},{"location":"reference/predicates/#header","title":"Header","text":"

    A header key and exact value that must be present in the request. Note that Header(\u201cKey\u201d, \u201cValue\u201d) is equivalent to HeaderRegexp(\u201cKey\u201d, \u201c^Value$\u201d).

    Parameters:

    • Header (string, string)

    Examples:

    Header(\"X-Forwarded-For\", \"192.168.0.2\")\nHeader(\"Accept\", \"application/json\")\n
    "},{"location":"reference/predicates/#headerregexp","title":"HeaderRegexp","text":"

    A header key and a regular expression, where the key must be present in the request and one of the associated values must match the expression.

    Parameters:

    • HeaderRegexp (string, regex)

    Examples:

    HeaderRegexp(\"X-Forwarded-For\", \"^192\\.168\\.0\\.[0-2]?[0-9]?[0-9] \")\nHeaderRegexp(\"Accept\", \"application/(json|xml)\")\n
    "},{"location":"reference/predicates/#cookie","title":"Cookie","text":"

    Matches if the specified cookie is set in the request.

    Parameters:

    • Cookie (string, regex) name and value match

    Examples:

    Cookie(\"alpha\", /^enabled$/)\n
    "},{"location":"reference/predicates/#auth","title":"Auth","text":"

    Authorization header based match.

    "},{"location":"reference/predicates/#jwtpayloadanykv","title":"JWTPayloadAnyKV","text":"

    Match the route if at least one of the base64 decoded JWT content matches the key value configuration.

    Parameters:

    • Key-Value pairs (\u2026string), odd index is the key of the JWT content and even index is the value of the JWT content

    Examples:

    JWTPayloadAnyKV(\"iss\", \"https://accounts.google.com\")\nJWTPayloadAnyKV(\"iss\", \"https://accounts.google.com\", \"email\", \"skipper-router@googlegroups.com\")\n
    "},{"location":"reference/predicates/#jwtpayloadallkv","title":"JWTPayloadAllKV","text":"

    Match the route if all of the base64 decoded JWT content matches the key value configuration.

    Parameters:

    • Key-Value pairs (\u2026string), odd index is the key of the JWT content and even index is the value of the JWT content

    Examples:

    JWTPayloadAllKV(\"iss\", \"https://accounts.google.com\")\nJWTPayloadAllKV(\"iss\", \"https://accounts.google.com\", \"email\", \"skipper-router@googlegroups.com\")\n
    "},{"location":"reference/predicates/#jwtpayloadanykvregexp-jwtpayloadallkvregexp","title":"JWTPayloadAnyKVRegexp, JWTPayloadAllKVRegexp","text":"

    Behaves exactly the same as JWTPayloadAnyKV, JWTPayloadAllKV, but the expected values are regular expressions that will be matched against the JWT value.

    Examples:

    JWTPayloadAllKVRegexp(\"iss\", \"^https://\")\nJWTPayloadAnyKVRegexp(\"iss\", \"^https://\")\n
    "},{"location":"reference/predicates/#headersha256","title":"HeaderSHA256","text":"

    Matches if SHA-256 hash of the header value (known as pre-shared key or secret) equals to any of the configured hash values. Several hash values could be used to match multiple secrets e.g. during secret rotation.

    Hash values only hide secrets from parties that have access to the source of Skipper routes. Authentication strength depends on the strength of the secret value so e.g. HeaderSHA256(\"X-Secret\", \"2bb80d537b1da3e38bd30361aa855686bde0eacd7162fef6a25fe97bf527a25b\") is not stronger than just Header(\"X-Secret\", \"secret\").

    The secret value must be kept secret, must be used by a single client and must be rotated periodically. See below how to generate random secret value using OpenSSL.

    Parameters:

    • header name (string)
    • one or more hex-encoded SHA-256 hashes of the matching header values (string)

    Secure secret value example:

    #\n# 1. Generate cryptographically secure pseudo random secret header value:\n# - length of at least 32 bytes (the size of the SHA-256 output)\n# - encode as -base64 or -hex to get ASCII text value\n#\nSECRET=$(openssl rand -base64 32)\necho $SECRET\n3YchPsliGjBXvyl/ncLWEI8/loKGrj/VNM4garxWEmA=\n\n#\n# 2. Get SHA-256 hash of the secret header value to use as HeaderSHA256 argument:\n# - use echo -n to not output the trailing newline\n#\necho -n $SECRET | sha256sum\na6131ba920df753c8109500cc11818f7192336d06532f6fa13009c2e4f6e1841  -\n
    // 3. Configure route to match hash of the secret value\nHeaderSHA256(\n    \"X-Secret\",\n    \"a6131ba920df753c8109500cc11818f7192336d06532f6fa13009c2e4f6e1841\"\n) -> inlineContent(\"ok\\n\") -> <shunt>\n
    # 4. Test secret value\ncurl -H \"X-Secret: $SECRET\" http://localhost:9090\n

    Secret rotation example:

    // To rotate secret:\n// * add new secret - both old and new secrets match during rotation\n// * update client to use new secret\n// * remove old secret\nHeaderSHA256(\n    \"X-Secret\",\n    \"cba06b5736faf67e54b07b561eae94395e774c517a7d910a54369e1263ccfbd4\", // SHA256(\"old\")\n    \"11507a0e2f5e69d5dfa40a62a1bd7b6ee57e6bcd85c67c9b8431b36fff21c437\"  // SHA256(\"new\")\n) -> inlineContent(\"ok\\n\") -> <shunt>\n

    Basic access authentication example:

    anon: * -> setResponseHeader(\"WWW-Authenticate\", `Basic realm=\"foo\", charset=\"UTF-8\"`) -> status(401) -> <shunt>;\nauth: HeaderSHA256(\n    \"Authorization\",\n    \"caae07e42ed8d231a58edcde95782b0feb67186172c18c89894ce4c2174df137\", // SHA256(\"Basic \" + BASE64(\"test:123\u00a3\"))\n    \"157da8472590f0ce0a7c651bd79aecb5cc582944fcf76cbabada915d333deee8\"  // SHA256(\"Basic \" + BASE64(\"Aladdin:open sesame\"))\n) -> inlineContent(\"ok\\n\") -> <shunt>;\n

    "},{"location":"reference/predicates/#interval","title":"Interval","text":"

    An interval implements custom predicates to match routes only during some period of time.

    There are three predicates: Between, Before and After. All predicates can be created using the date represented as: * a string in RFC3339 format (see https://golang.org/pkg/time/#pkg-constants) * a string in RFC3339 format without numeric timezone offset and a location name corresponding to a file in the IANA Time Zone database * an int64 or float64 number corresponding to the given Unix time in seconds since January 1, 1970 UTC. float64 number will be converted into int64 number

    "},{"location":"reference/predicates/#after","title":"After","text":"

    Matches if the request is after the specified time

    Parameters:

    • After (string) RFC3339 datetime string
    • After (string, string) RFC3339 datetime string without timezone offset, location name
    • After (int) unixtime in seconds

    Examples:

    After(\"2016-01-01T12:00:00+02:00\")\nAfter(\"2021-02-18T00:00:00\", \"Europe/Berlin\")\nAfter(1451642400)\n
    "},{"location":"reference/predicates/#before","title":"Before","text":"

    Matches if the request is before the specified time

    Parameters:

    • Before (string) RFC3339 datetime string
    • Before (string, string) RFC3339 datetime string without timezone offset, location name
    • Before (int) unixtime in seconds

    Examples:

    Before(\"2016-01-01T12:00:00+02:00\")\nBefore(\"2021-02-18T00:00:00\", \"Europe/Berlin\")\nBefore(1451642400)\n
    "},{"location":"reference/predicates/#between","title":"Between","text":"

    Matches if the request is between the specified timeframe

    Parameters:

    • Between (string, string) RFC3339 datetime string, from - till
    • Between (string, string, string) RFC3339 datetime string without timezone offset, from - till and a location name
    • Between (int, int) unixtime in seconds, from - till

    Examples:

    Between(\"2016-01-01T12:00:00+02:00\", \"2016-02-01T12:00:00+02:00\")\nBetween(\"2021-02-18T00:00:00\", \"2021-02-18T01:00:00\", \"Europe/Berlin\")\nBetween(1451642400, 1454320800)\n
    "},{"location":"reference/predicates/#cron","title":"Cron","text":"

    Matches routes when the given cron-like expression matches the system time.

    Parameters:

    • Cron-like expression. See the package documentation for supported & unsupported features. Expressions are expected to be in the same time zone as the system that generates the time.Time instances.

    Examples:

    // match everything\nCron(\"* * * * *\")\n// match only when the hour is between 5-7 (inclusive)\nCron(\"* 5-7, * * *\")\n// match only when the hour is between 5-7, equal to 8, or between 12-15\nCron(\"* 5-7,8,12-15 * * *\")\n// match only when it is weekdays\nCron(\"* * * * 1-5\")\n// match only when it is weekdays & working hours\nCron(\"* 7-18 * * 1-5\")\n
    "},{"location":"reference/predicates/#queryparam","title":"QueryParam","text":"

    Match request based on the Query Params in URL

    Parameters:

    • QueryParam (string) name
    • QueryParam (string, regex) name and value match

    Examples:

    // matches http://example.org?bb=a&query=withvalue\nQueryParam(\"query\")\n\n// Even a query param without a value\n// matches http://example.org?bb=a&query=\nQueryParam(\"query\")\n\n// matches with regexp\n// matches http://example.org?bb=a&query=example\nQueryParam(\"query\", \"^example$\")\n\n// matches with regexp and multiple values of query param\n// matches http://example.org?bb=a&query=testing&query=example\nQueryParam(\"query\", \"^example$\")\n
    "},{"location":"reference/predicates/#source","title":"Source","text":"

    Source implements a custom predicate to match routes based on the source IP or X-Forwarded-For header of a request.

    Parameters:

    • Source (string, ..) varargs with IPs or CIDR

    Examples:

    // only match requests from 1.2.3.4\nSource(\"1.2.3.4\")\n\n// only match requests from 1.2.3.0 - 1.2.3.255\nSource(\"1.2.3.0/24\")\n\n// only match requests from 1.2.3.4 and the 2.2.2.0/24 network\nSource(\"1.2.3.4\", \"2.2.2.0/24\")\n
    "},{"location":"reference/predicates/#sourcefromlast","title":"SourceFromLast","text":"

    The same as Source, but use the last part of the X-Forwarded-For header to match the network. This seems to be only used in the popular loadbalancers from AWS, ELB and ALB, because they put the client-IP as last part of the X-Forwarded-For headers.

    Parameters:

    • SourceFromLast (string, ..) varargs with IPs or CIDR

    Examples:

    SourceFromLast(\"1.2.3.4\", \"2.2.2.0/24\")\n
    "},{"location":"reference/predicates/#clientip","title":"ClientIP","text":"

    ClientIP implements a custom predicate to match routes based on the client IP of a request.

    Parameters:

    • ClientIP (string, ..) varargs with IPs or CIDR

    Examples:

    // only match requests from 1.2.3.4\nClientIP(\"1.2.3.4\")\n\n// only match requests from 1.2.3.0 - 1.2.3.255\nClientIP(\"1.2.3.0/24\")\n\n// only match requests from 1.2.3.4 and the 2.2.2.0/24 network\nClientIP(\"1.2.3.4\", \"2.2.2.0/24\")\n
    "},{"location":"reference/predicates/#tee","title":"Tee","text":"

    The Tee predicate matches a route when a request is spawn from the teeLoopback filter as a tee request, using the same provided label.

    Parameters:

    • tee label (string): the predicate will match only those requests that were spawn from a teeLoopback filter using the same label.

    See also:

    • teeLoopback filter
    • Shadow Traffic Tutorial
    "},{"location":"reference/predicates/#traffic","title":"Traffic","text":"

    Traffic implements a predicate to control the matching probability for a given route by setting its weight.

    The probability for matching a route is defined by the mandatory first parameter, that must be a decimal number between 0.0 and 1.0 (both inclusive).

    The optional second argument is used to specify the cookie name for the traffic group, in case you want to use stickiness. Stickiness allows all subsequent requests from the same client to match the same route. Stickiness of traffic is supported by the optional third parameter, indicating whether the request being matched belongs to the traffic group of the current route. If yes, the predicate matches ignoring the chance argument.

    Parameters:

    • Traffic (decimal) valid values [0.0, 1.0]
    • Traffic (decimal, string, string) session stickiness

    Examples:

    non-sticky:

    // hit by 10% percent chance\nv2:\n    Traffic(.1) ->\n    \"https://api-test-green\";\n\n// hit by remaining chance\nv1:\n    * ->\n    \"https://api-test-blue\";\n

    stickiness:

    // hit by 5% percent chance\ncartTest:\n    Traffic(.05, \"cart-test\", \"test\") && Path(\"/cart\") ->\n    responseCookie(\"cart-test\", \"test\") ->\n    \"https://cart-test\";\n\n// hit by remaining chance\ncart:\n    Path(\"/cart\") ->\n    responseCookie(\"cart-test\", \"default\") ->\n    \"https://cart\";\n\n// hit by 15% percent chance\ncatalogTestA:\n    Traffic(.15, \"catalog-test\", \"A\") ->\n    responseCookie(\"catalog-test\", \"A\") ->\n    \"https://catalog-test-a\";\n\n// hit by 30% percent chance\ncatalogTestB:\n    Traffic(.3, \"catalog-test\", \"B\") ->\n    responseCookie(\"catalog-test\", \"B\") ->\n    \"https://catalog-test-b\";\n\n// hit by remaining chance\ncatalog:\n    * ->\n    responseCookie(\"catalog-test\", \"default\") ->\n    \"https://catalog\";\n
    "},{"location":"reference/predicates/#trafficsegment","title":"TrafficSegment","text":"

    TrafficSegment predicate requires two number arguments \\(min\\) and \\(max\\) from an interval \\([0, 1]\\) (from zero included to one included) and \\(min <= max\\).

    Let \\(r\\) be one-per-request uniform random number value from \\([0, 1)\\). TrafficSegment matches if \\(r\\) belongs to an interval from \\([min, max)\\). Upper interval boundary \\(max\\) is excluded to simplify definition of adjacent intervals - the upper boundary of the first interval then equals lower boundary of the next and so on, e.g. \\([0, 0.25)\\) and \\([0.25, 1)\\).

    This predicate has weight of -1 and therefore does not affect route weight.

    Parameters:

    • min (decimal) from an interval [0, 1]
    • max (decimal) from an interval [0, 1], min <= max

    Example of routes splitting traffic in 50%+30%+20% proportion:

    r50: Path(\"/test\") && TrafficSegment(0.0, 0.5) -> <shunt>;\nr30: Path(\"/test\") && TrafficSegment(0.5, 0.8) -> <shunt>;\nr20: Path(\"/test\") && TrafficSegment(0.8, 1.0) -> <shunt>;\n
    "},{"location":"reference/predicates/#contentlengthbetween","title":"ContentLengthBetween","text":"

    The ContentLengthBetween predicate matches a route when a request content length header value is between min and max provided values. In case the client does not specify the content length value then the predicate will not match.

    Parameters:

    • min (int): the lower bound (inclusive) for the content length check. The value must be greater than or equal to 0.
    • max (int): the upper bound (exclusive) for the content length check. The value must be greater than min.

    Examples:

    // matches the range from 0 to 999\nContentLengthBetween(0, 1000)\n\n// matches the range from 1000 to 9999\nContentLengthBetween(1000, 10000)\n
    "},{"location":"reference/scripts/","title":"Lua filter scripts","text":"

    LUA scripts can be used as filters in skipper. The current implementation supports Lua 5.1.

    "},{"location":"reference/scripts/#route-filters","title":"Route filters","text":"

    The lua scripts can be added to a route description with the lua() filter, the first parameter for the filter is the script. This can be either a file name (ending with .lua) or inline code, e.g. as

    • file lua(\"/path/to/file.lua\") - if a file path is not absolute, the path is relative to skipper\u2019s working directory.
    • inline lua(\"function request(c, p); print(c.request.url); end\")

    Any other additional parameters for the filter will be passed as a second table parameter to the called functions.

    Any parameter starting with \u201clua-\u201d should not be used to pass values for the script - those will be used for configuring the filter.

    "},{"location":"reference/scripts/#script-requirements","title":"Script requirements","text":"

    A filter script needs at least one global function: request or response. If present, they are called with a skipper filter context and the params passed in the route as table like

    -- route looks like\n--\n-- any: * -> lua(\"./test.lua\", \"myparam=foo\", \"other=bar\", \"justkey\") -> <shunt>\n--\nfunction request(ctx, params)\n    print(params[1])      -- myparam=foo\n    print(params[2])      -- other=bar\n    print(params[3])      -- justkey\n    print(params[4])      -- nil\n    print(params.myparam) -- foo\n    print(params.other)   -- bar\n    print(params.justkey) -- (empty string)\n    print(params.x)       -- nil\nend\n

    Parameter table allows index access as well as key-value access

    "},{"location":"reference/scripts/#print-builtin","title":"print builtin","text":"

    Lua print builtin function writes skipper info log messages.

    "},{"location":"reference/scripts/#sleep","title":"sleep","text":"

    sleep(number) function pauses execution for at least number milliseconds. A negative or zero duration causes sleep to return immediately.

    "},{"location":"reference/scripts/#enable-and-disable-lua-sources","title":"Enable and Disable lua sources","text":"

    The flag -lua-sources allows to set 5 different values:

    • \u201cfile\u201d: Allows to use reference to file for scripts
    • \u201cinline\u201d: Allows to use inline scripts
    • \u201cinline\u201d, \u201cfile\u201d: Allows to use reference to file and inline scripts
    • \u201cnone\u201d: Disable Lua filters
    • \u201d\u201c: the same as \u201cinline\u201d, \u201cfile\u201d, the default value for binary and library users
    "},{"location":"reference/scripts/#available-lua-modules","title":"Available lua modules","text":"

    Besides the standard modules - except for debug - the following additional modules have been preloaded and can be used with e.g. local http = require(\"http\"), see also the examples below

    • http gluahttp - TODO: configurable with something different than &http.Client{}
    • url gluaurl
    • json gopher-json
    • base64 lua base64

    For differences between the standard modules and the gopher-lua implementation check the gopher-lua documentation.

    Any other module can be loaded in non-byte code form from the lua path (by default for require(\"mod\") this is ./mod.lua, /usr/local/share/lua/5.1/mod.lua and /usr/local/share/lua/5.1/mod/init.lua).

    You may selectively enable standard and additional Lua modules using -lua-modules flag:

    -lua-modules=package,base,json\n
    Note that preloaded additional modules require package module.

    For standard modules you may enable only a subset of module symbols:

    -lua-modules=base.print,base.assert\n

    Use none to disable all modules:

    -lua-modules=none\n

    See also http://lua-users.org/wiki/SandBoxes

    "},{"location":"reference/scripts/#lua-states","title":"Lua states","text":"

    There is no guarantee that the request() and response() functions of a lua script run in the same lua state during one request. Setting a variable in the request and accessing it in the response will most likely fail and lead to hard debuggable errors. Use the ctx.state_bag to propagate values from request to response - and any other filter in the chain.

    "},{"location":"reference/scripts/#request-and-response","title":"Request and response","text":"

    The request() function is run for an incoming request and response() for backend response.

    "},{"location":"reference/scripts/#headers","title":"Headers","text":"

    Request headers can be accessed via ctx.request.header table like

    ua = ctx.request.header[\"user-agent\"]\n
    and iterated like
    for k, v in ctx.request.header() do\n    print(k, \"=\", v);\nend\n

    Header table is a functable that returns iterator

    Header names are normalized by the net/http go module like usual. Setting a header is done by assigning to the header table. Setting a header to nil or an empty string deletes the header - setting to nil is preferred.

    ctx.request.header[\"user-agent\"] = \"skipper.lua/0.0.1\"\nctx.request.header[\"Authorization\"] = nil -- delete authorization header\n

    header table returns empty string for missing keys

    Response headers ctx.response.header work the same way - this is of course only valid in the response() phase.

    "},{"location":"reference/scripts/#multiple-header-values","title":"Multiple header values","text":"

    Request and response header tables provide access to a first value of a header.

    To access multiple values use add and values methods:

    function request(ctx, params)\n    ctx.request.header.add(\"X-Foo\", \"Bar\")\n    ctx.request.header.add(\"X-Foo\", \"Baz\")\n\n    -- all X-Foo values\n    for _, v in pairs(ctx.request.header.values(\"X-Foo\")) do\n        print(v)\n    end\n\n    -- all values\n    for k, _ in ctx.request.header() do\n        for _, v in pairs(ctx.request.header.values(k)) do\n            print(k, \"=\", v)\n        end\n    end\nend\n
    "},{"location":"reference/scripts/#other-request-fields","title":"Other request fields","text":"
    • backend_url - (read only) returns the backend url specified in the route or an empty value in case it\u2019s a shunt or loopback
    • host - (read only) the \u2018Host\u2019 header that was in the incoming request to the proxy
    • outgoing_host - (read/write) the host that will be set for the outgoing proxy request as the \u2018Host\u2019 header.
    • remote_addr - (read only) the remote host, usually IP:port
    • content_length - (read only) content length
    • proto - (read only) something like \u201cHTTP/1.1\u201d
    • method - (read only) request method, e.g. \u201cGET\u201d or \u201cPOST\u201d
    • url - (read/write) request URL as string
    • url_path - (read/write) request URL path as string
    • url_query - (read/write) request URL query parameter table, similar to header table but returns nil for missing keys
    • url_raw_query - (read/write) encoded request URL query values, without \u2018?\u2019 as string
    • cookie - (read only) request cookie table, similar to header table but returns nil for missing keys
    "},{"location":"reference/scripts/#other-response-fields","title":"Other response fields","text":"
    • status_code - (read/write) response status code as number, e.g. 200
    "},{"location":"reference/scripts/#serving-requests-from-lua","title":"Serving requests from lua","text":"

    Requests can be served with ctx.serve(table), you must return after this call. Possible keys for the table:

    • status_code (number) - required (but currently not enforced)
    • header (table)
    • body (string)

    See also redirect and internal server error examples below

    "},{"location":"reference/scripts/#path-parameters","title":"Path parameters","text":"

    Path parameters (if any) can be read via ctx.path_param table

    Path(\"/api/:id\") -> lua(\"function request(ctx, params); print(ctx.path_param.id); end\") -> <shunt>\n

    path_param table returns nil for missing keys

    "},{"location":"reference/scripts/#statebag","title":"StateBag","text":"

    The state bag can be used to pass string, number and table values from one filter to another in the same chain. It is shared by all filters in one request (lua table values are only available to lua filters).

    function request(ctx, params)\n    -- the value of \"mykey\" will be available to all filters in the chain now:\n    ctx.state_bag[\"mykey\"] = \"foo\"\nend\n\nfunction response(ctx, params)\n    print(ctx.state_bag[\"mykey\"])\nend\n

    state_bag table returns nil for missing keys

    "},{"location":"reference/scripts/#examples","title":"Examples","text":"

    The examples serve as examples. If there is a go based plugin available, use that instead. For overhead estimate see benchmark.

    "},{"location":"reference/scripts/#oauth2-token-as-basic-auth-password","title":"OAuth2 token as basic auth password","text":"
    local base64 = require(\"base64\")\n\nfunction request(ctx, params)\n    token = string.gsub(ctx.request.header[\"Authorization\"], \"^%s*[Bb]earer%s+\", \"\", 1)\n    user = ctx.request.header[\"x-username\"]\n    if user == \"\" then\n        user = params.username\n    end\n    ctx.request.header[\"Authorization\"] = \"Basic \" .. base64.encode(user .. \":\"  .. token)\n    -- print(ctx.request.header[\"Authorization\"])\nend\n
    "},{"location":"reference/scripts/#validate-token","title":"validate token","text":"
    local http = require(\"http\")\nfunction request(ctx, params)\n    token = string.gsub(ctx.request.header[\"Authorization\"], \"^%s*[Bb]earer%s+\", \"\", 1)\n    if token == \"\" then\n        ctx.serve({status_code=401, body=\"Missing Token\"})\n        return\n    end\n\n    res, err = http.get(\"https://auth.example.com/oauth2/tokeninfo?access_token=\"..token)\n    if err ~= nil then\n        print(\"Failed to get tokeninfo: \" .. err)\n        ctx.serve({status_code=401, body=\"Failed to validate token: \"..err})\n        return\n    end\n    if res.status_code ~= 200 then\n        ctx.serve({status_code=401, body=\"Invalid token\"})\n        return\n    end\nend\n
    "},{"location":"reference/scripts/#strip-query","title":"strip query","text":"
    function request(ctx, params)\n    ctx.request.url = string.gsub(ctx.request.url, \"%?.*$\", \"\")\n    -- print(\"URL=\"..ctx.request.url)\nend\n
    "},{"location":"reference/scripts/#redirect","title":"redirect","text":"
    function request(ctx, params)\n    ctx.serve({\n        status_code=302,\n        header={\n            location=\"http://www.example.org/\",\n        },\n    })\nend\n
    "},{"location":"reference/scripts/#internal-server-error","title":"internal server error","text":"
    function request(ctx, params)\n    -- let 10% of all requests fail with 500\n    if math.random() < 0.1 then\n        ctx.serve({\n            status_code=500,\n            body=\"Internal Server Error.\\n\",\n        })\n    end\nend\n
    "},{"location":"reference/scripts/#set-request-header-from-params","title":"set request header from params","text":"
    function request(ctx, params)\n    ctx.request.header[params[1]] = params[2]\n    if params[1]:lower() == \"host\" then\n        ctx.request.outgoing_host = params[2]\n    end\nend\n
    "},{"location":"reference/scripts/#benchmark","title":"Benchmark","text":""},{"location":"reference/scripts/#redirectto-vs-lua-redirect","title":"redirectTo vs lua redirect","text":"

    See skptesting/benchmark-lua.sh

    Route for \u201cskipper\u201d is * -> redirectTo(302, \"http://localhost:9980\") -> <shunt>, route for \u201clua\u201d is * -> lua(\"function request(c,p); c.serve({status_code=302, header={location='http://localhost:9980'}});end\") -> <shunt>

    Benchmark results

    [benchmarking skipper-redirectTo]\nRunning 12s test @ http://127.0.0.1:9990/lorem.html\n  2 threads and 128 connections\n  Thread Stats   Avg      Stdev     Max   +/- Stdev\n    Latency     4.19ms    5.38ms  69.50ms   85.10%\n    Req/Sec    26.16k     2.63k   33.22k    64.58%\n  Latency Distribution\n     50%    1.85ms\n     75%    6.38ms\n     90%   11.66ms\n     99%   23.34ms\n  626122 requests in 12.04s, 91.36MB read\nRequests/sec:  51996.22\nTransfer/sec:      7.59MB\n[benchmarking skipper-redirectTo done]\n\n[benchmarking redirect-lua]\nRunning 12s test @ http://127.0.0.1:9991/lorem.html\n  2 threads and 128 connections\n  Thread Stats   Avg      Stdev     Max   +/- Stdev\n    Latency     6.81ms    9.69ms 122.19ms   85.95%\n    Req/Sec    21.17k     2.83k   30.63k    73.75%\n  Latency Distribution\n     50%    2.21ms\n     75%   10.22ms\n     90%   19.88ms\n     99%   42.54ms\n  507434 requests in 12.06s, 68.72MB read\nRequests/sec:  42064.69\nTransfer/sec:      5.70MB\n[benchmarking redirect-lua done]\n
    show lua performance is ~80% of native.

    The benchmark was run with the default pool size of script.InitialPoolSize = 3; script.MaxPoolSize = 10. With script.InitialPoolSize = 128; script.MaxPoolSize = 128 (tweaked for this benchmark) you get >95% of native performance in lua:

    [benchmarking skipper-redirectTo]\nRunning 12s test @ http://127.0.0.1:9990/lorem.html\n  2 threads and 128 connections\n  Thread Stats   Avg      Stdev     Max   +/- Stdev\n    Latency     4.15ms    5.24ms  62.27ms   84.88%\n    Req/Sec    25.81k     2.64k   32.74k    70.00%\n  Latency Distribution\n     50%    1.88ms\n     75%    6.49ms\n     90%   11.43ms\n     99%   22.49ms\n  617499 requests in 12.03s, 90.10MB read\nRequests/sec:  51336.87\nTransfer/sec:      7.49MB\n[benchmarking skipper-redirectTo done]\n\n[benchmarking redirect-lua]\nRunning 12s test @ http://127.0.0.1:9991/lorem.html\n  2 threads and 128 connections\n  Thread Stats   Avg      Stdev     Max   +/- Stdev\n    Latency     3.79ms    4.98ms  91.19ms   87.15%\n    Req/Sec    25.14k     4.71k   51.45k    72.38%\n  Latency Distribution\n     50%    1.61ms\n     75%    5.17ms\n     90%   10.05ms\n     99%   21.83ms\n  602630 requests in 12.10s, 81.61MB read\nRequests/sec:  49811.24\nTransfer/sec:      6.75MB\n[benchmarking redirect-lua done]\n

    Similar results are achieved when testing stripQuery() vs the lua version from above.

    "},{"location":"tutorials/auth/","title":"Authentication and Authorization","text":""},{"location":"tutorials/auth/#basic-auth","title":"Basic auth","text":"

    Basic Auth is defined in RFC7617.

    Install htpasswd command line tool, we assume Debian based system. Please refer the documentation of your Operating System or package management vendor how to install htpasswd:

    apt-get install apache2-utils\n

    Create a htpasswd file foo.passwd and use captain with password apassword:

    htpasswd -bcB foo.passwd captain apassword\n

    Start skipper with a basicAuth filter referencing the just created htpasswd file:

    ./bin/skipper -address :8080 -inline-routes 'r: * -> basicAuth(\"foo.passwd\") -> status(200) -> <shunt>'\n

    A client request without login credentials or wrong credentials:

    % curl localhost:8080/ -v\n*   Trying ::1...\n* Connected to localhost (::1) port 8080 (#0)\n> GET / HTTP/1.1\n> Host: localhost:8080\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 401 Unauthorized\n< Server: Skipper\n< Www-Authenticate: Basic realm=\"Basic Realm\"\n< Date: Thu, 01 Nov 2018 21:27:18 GMT\n< Content-Length: 0\n<\n* Connection #0 to host localhost left intact\n

    A client request with the correct credentials:

    % curl captain:apassword@localhost:8080/ -v\n*   Trying ::1...\n* Connected to localhost (::1) port 8080 (#0)\n* Server auth using Basic with user 'captain'\n> GET / HTTP/1.1\n> Host: localhost:8080\n> Authorization: Basic Y2FwdGFpbjphcGFzc3dvcmQ=\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Server: Skipper\n< Date: Thu, 01 Nov 2018 21:29:21 GMT\n< Content-Length: 0\n<\n* Connection #0 to host localhost left intact\n
    "},{"location":"tutorials/auth/#token-service-to-service","title":"Token service-to-service","text":"

    Service to service authentication and authorization is often done by using the HTTP Authorization header with the content prefix \u201cBearer \u201c, for example \u201cAuthorization: Bearer mytoken\u201d.

    Supported token formats

    • OAuth2 access tokens
    • JWT
    "},{"location":"tutorials/auth/#tokeninfo","title":"Tokeninfo","text":"

    Tokeninfo is a common, but not specified protocol, only supporting Bearer tokens in the Authorization header.

    In most cases you would have to have your own OAuth2 token infrastructure, that can return JWT or OAuth2 access tokens to authenticated parties and validate tokens with their custom tokeninfo endpoint. In case of JWT the access token is signed and can be validated without a central tokeninfo endpoint.

    Example route:

    all: Path(\"/\")\n     -> oauthTokeninfoAnyScope(\"read-X\", \"readwrite-X\")\n     -> \"http://localhost:9090/\"\n

    The access token should be passed from the client as Bearer token in the Authorization header. Skipper will send this token unchanged as Bearer token in the Authorization header to the Tokeninfo endpoint. The request flow with a Tokeninfo setup is shown in the following picture:

    "},{"location":"tutorials/auth/#tokenintrospection-rfc7662","title":"Tokenintrospection RFC7662","text":"

    Tokenintrospection service to service authentication and authorization is specified by RFC7662. Skipper uses RFC Draft for discovering token infrastructure configuration, to find the introspection_endpoint.

    Example route:

    all: *\n        -> oauthTokenintrospectionAnyKV(\"https://identity.example.com/managed-id\", \"jdoe\")\n        -> \"http://localhost:9090/\";\n

    The access token should be passed from the client as Bearer token in the Authorization header. Skipper will send this token as defined in RFC7662 in a POST request \u201capplication/x-www-form-urlencoded\u201d as value for key token to the Tokenintrospection endpoint. The request flow with Tokenintrospection setup is shown in the following picture:

    "},{"location":"tutorials/auth/#openid-connect","title":"OpenID Connect","text":"

    OpenID Connect is an OAuth2.0 based authentication and authorization mechanism supported by several providers. Skipper can act as a proxy for backend server which requires authenticated clients. Skipper handles the authentication with the provider and upon successful completion of authentication passes subsequent requests to the backend server.

    Skipper\u2019s implementation of OpenID Connect Client works as follows:

    1. Filter is initialized with the following parameters:
      1. Secrets file with keys used for encrypting the token in a cookie and also for generating shared secret.
      2. OpenID Connect Provider URL
      3. The Client ID
      4. The Client Secret
      5. The Callback URL for the client when a user successfully authenticates and is returned.
      6. The Scopes to be requested along with the openid scope
      7. The claims that should be present in the token or the fields need in the user information.
    2. The user makes a request to a backend which is covered by an OpenID filter.
    3. Skipper checks if a cookie is set with any previous successfully completed OpenID authentication.
    4. If the cookie is valid then Skipper passes the request to the backend.
    5. If the cookie is not valid then Skipper redirects the user to the OpenID provider with its Client ID and a callback URL.
    6. When the user successfully completes authentication the provider redirects the user to the callback URL with a token.
    7. Skipper receives this token and makes a backend channel call to get an ID token and other required information.
    8. If all the user information/claims are present then it encrypts this and sets a cookie which is encrypted and redirects the user to the originally requested URL.

    To use OpenID define a filter for a backend which needs to be covered by OpenID Connection authentication.

    oauthOidcAllClaims(\"https://accounts.identity-provider.com\", \"some-client-id\",\n    \"some-client-secret\", \"http://callback.com/auth/provider/callback\", \"scope1 scope2\",\n    \"claim1 claim2\") -> \"https://internal.example.org\";\n

    Here scope1 scope2 are the scopes that should be included which requesting authentication from the OpenID provider. Any number of scopes can be specified here. The openid scope is added automatically by the filter. The other fields which need to be specified are the URL of the provider which in the above example is https://accounts.identity-provider.com. The client ID and the client secret. The callback URL which is specified while generating the client id and client secret. Then the scopes and finally the claims which should be present along with the return id token.

    oauthOidcUserInfo(\"https://oidc-provider.example.com\", \"client_id\", \"client_secret\",\n    \"http://target.example.com/subpath/callback\", \"email profile\",\n    \"name email picture\") -> \"https://internal.example.org\";\n

    This filter is similar but it verifies that the token has certain user information information fields accessible with the token return by the provider. The fields can be specified at the end like in the example above where the fields name, email and picture are requested.

    Upon successful authentication Skipper will start allowing the user requests through to the backend. Along with the original request to the backend Skipper will include information which it obtained from the provider. The information is in JSON format with the header name Skipper-Oidc-Info. In the case of the claims container the header value is in the format.

    {\n    \"oauth2token\": \"xxx\",\n    \"claims\": {\n        \"claim1\": \"val1\",\n        \"claim2\": \"val2\"\n    },\n    \"subject\": \"subj\"\n}\n

    In the case of a user info filter the payload is in the format:

    {\n    \"oauth2token\": \"xxx\",\n    \"userInfo\": {\n        \"sub\": \"sub\",\n        \"profile\": \"prof\",\n        \"email\": \"abc@example.com\",\n        \"email_verified\": \"abc@example.com\"\n    },\n    \"subject\": \"subj\"\n}\n

    Skipper encrypts the cookies and also generates a nonce during the OAuth2.0 flow for which it needs a secret key. This key is in a file which can be rotated periodically because it is reread by Skipper. The path to this file must be passed with the flag -oidc-secrets-file when Skipper is started.

    "},{"location":"tutorials/auth/#authz-and-access-control","title":"AuthZ and access control","text":"

    Authorization validation and access control is available by means of a subsequent filter oidcClaimsQuery. It inspects the ID token, which exists after a successful oauthOidc* filter step, and validates the defined query with the request path.

    Given following example ID token:

    {\n  \"email\": \"someone@example.org\",\n  \"groups\": [\n    \"CD-xyz\",\n    \"appX-Tester\"\n  ],\n  \"name\": \"Some One\"\n}\n

    Access to path / would be granted to everyone in example.org, however path /login only to those being member of group \"appX-Tester\":

    oauthOidcAnyClaims(...) -> oidcClaimsQuery(\"/login:groups.#[==\\\"appX-Tester\\\"]\", \"/:@_:email%\\\"*@example.org\\\"\")\n
    "},{"location":"tutorials/auth/#oauth2-authorization-grant-flow","title":"OAuth2 authorization grant flow","text":"

    Authorization grant flow is a mechanism to coordinate between a user-agent, a client, and an authorization server to obtain an OAuth2 access token for a user. Skipper supports the flow with the oauthGrant() filter. It works as follows:

    1. A user makes a request to a route with oauthGrant().
    2. The filter checks whether the request has a cookie called oauth-grant1. If it does not, or if the cookie and its tokens are invalid, it redirects the user to the OAuth2 provider\u2019s authorization endpoint2.
    3. The user logs into the external OAuth2 provider, e.g. by providing a username and password.
    4. The provider redirects the user back to Skipper with an authorization code, using the redirect_uri URL parameter which was part of the previous redirect2. The callback route must have a grantCallback() filter defined. Skipper automatically adds this callback route for you when the OAuth2 authorization grant flow feature is enabled. Note that the automatically added callback route does not apply default filters. If you need default filters to be applied to the callback route as well, please register the route manually in your routes files.
    5. Skipper calls the provider\u2019s token URL with the authorization code, and receives a response with the access and refresh tokens.
    6. Skipper stores the tokens in an oauth-grant1 cookie which is stored in the user\u2019s browser.
    7. Subsequent calls to any route with an oauthGrant() filter will now pass as long as the access token is valid.

    1 The name of this cookie can be changed by providing the -oauth2-token-cookie-name parameter.

    2 The value of redirect_uri parameter of the authorization flow could be set by providing -oauth2-auth-url-parameters=redirect_uri=https://example.org/oauth-callback. If not set Skipper will automatically determine it based on the initial request hostname and -oauth2-callback-path flag value.

    Please note that it is not currently possible to use multiple OAuth2 providers with Skipper.

    "},{"location":"tutorials/auth/#encrypted-cookie-tokens","title":"Encrypted cookie tokens","text":"

    The cookie set by the oauthGrant() filter contains the OAuth2 access and refresh tokens in encrypted form. This means Skipper does not need to persist any session information about users, while also not exposing the tokens to users.

    "},{"location":"tutorials/auth/#token-refresh","title":"Token refresh","text":"

    The oauthGrant() filter also supports token refreshing. Once the access token expires and the user makes another request, the filter automatically refreshes the token and sets the updated cookie in the response.

    "},{"location":"tutorials/auth/#instructions","title":"Instructions","text":"

    To use authorization grant flow, you need to:

    1. Configure OAuth2 credentials.
    2. Configure the grant filters with OAuth2 URLs.
    3. Add the OAuth2 grant filters to routes.
    "},{"location":"tutorials/auth/#configure-oauth2-credentials","title":"Configure OAuth2 credentials","text":"

    Before you start, you need to register your application with the OAuth2 provider. If your provider asks you for the callback URL, provide the URL that you set as the -oauth2-callback-path parameter. If you did not provide a value, use the default route : /.well-known/oauth2-callback.

    Skipper must be configured with the following credentials and secrets:

    1. OAuth2 client ID for authenticating with the OAuth2 provider.
    2. OAuth2 client secret for authenticating with the OAuth2 provider.
    3. Cookie encryption secret for encrypting and decrypting token cookies.

    You can load all of these secrets from separate files, in which case they get automatically reloaded to support secret rotation. You can provide the paths to the files containing each secret as follows:

    skipper -oauth2-client-id-file=/path/to/client_id \\\n    -oauth2-client-secret-file=/path/to/client_secret \\\n    -oauth2-secret-file=/path/to/cookie_encryption_secret \\\n    -credentials-update-interval=30s\n

    Paths may contain {host} placeholder which will be replaced by the request host. This is used to define separate credentials for different hosts.

    Care must be taken when used in conjunction with -credentials-paths option because files from -credentials-paths are available to bearerinjector filter. That is -credentials-paths=/path/to in above example will expose grant files to bearerinjector filter.

    You can modify the secret update interval using the -credentials-update-interval argument. In example above, the interval is configured to reload the secrets from the files every 30 seconds.

    If you prefer, you can provide the client ID and secret values directly as arguments to Skipper instead of loading them from files. In that case, call Skipper with:

    skipper -oauth2-client-id=<CLIENT_ID> -oauth2-client-secret=<CLIENT_SECRET>\n
    "},{"location":"tutorials/auth/#configure-the-grant-filters","title":"Configure the grant filters","text":"

    The grant filters need to be enabled and configured with your OAuth2 provider\u2019s authorization, token, and tokeninfo endpoints. This can be achieved by providing Skipper with the following arguments:

    skipper -enable-oauth2-grant-flow \\\n    -oauth2-auth-url=<OAUTH2_AUTHORIZE_ENDPOINT> \\\n    -oauth2-token-url=<OAUTH2_TOKEN_ENDPOINT> \\\n    -oauth2-revoke-token-url=<OAUTH2_REVOKE_TOKEN_ENDPOINT> \\\n    -oauth2-tokeninfo-url=<OAUTH2_TOKENINFO_ENDPOINT> \\\n    -oauth2-callback-path=/oauth/callback\n

    The -oauth2-revoke-token-url is optional, and should be supplied if you plan to use the grantLogout filter to revoke tokens.

    You can configure the oauthGrant() filter further for your needs. See the oauthGrant filter reference for more details.

    "},{"location":"tutorials/auth/#add-filters-to-your-routes","title":"Add filters to your routes","text":"

    You can protect any number of routes with the oauthGrant() filter. Unauthenticated users will be refused access and redirected to log in.

    Skipper will automatically add a callback route for you with the grantCallback filter registered on it. The path for this route can be configured with the -oauth2-callback-path parameter. If the parameter is not given, it will be /.well-known/oauth2-callback

    You can optionally add a grantLogout() filter to delete token cookie. If -oauth2-revoke-token-url is set it will revoke access and refresh tokens:

    foo:\n    Path(\"/foo\")\n    -> oauthGrant()\n    -> \"http://localhost:9090\";\n\nlogout:\n    Path(\"/logout)\n    -> grantLogout()\n    -> redirectTo(302)\n    -> <shunt>;\n
    "},{"location":"tutorials/auth/#optional-authz-and-access-control","title":"(Optional) AuthZ and access control","text":"

    You can add a grantClaimsQuery filter after a oauthGrant to control access based on any OAuth2 claim. A claim is any property returned by the tokeninfo endpoint. The filter works exactly like the oidcClaimsQuery filter (it is actually just an alias for it).

    For example, if your tokeninfo endpoint returns the following JSON:

    {\n    \"scope\": [\"email\"],\n    \"username\": \"foo\"\n}\n

    you could limit the access to a given route only to users that have the email scope by doing the following:

    1. Append a grantClaimsQuery filter to the oauthGrant filter with the following query:
      -> oauthGrant() -> grantClaimsQuery(\"/path:scope.#[==\\\"email\\\"]\")\n
    2. Provide the name of the claim that corresponds to the OAuth2 subject in the tokeninfo payload as an argument to Skipper:
      skipper -oauth2-tokeninfo-subject-key=username\n

    The subject is the field that identifies the user and is often called sub, especially in the context of OpenID Connect. In the example above, it is username.

    "},{"location":"tutorials/auth/#open-policy-agent","title":"Open Policy Agent","text":"

    To enable Open Policy Agent filter, use the -enable-open-policy-agent command line flag.

    Open Policy Agent is integrated as a Go library so no extra setup is needed to run. Every filter creates a virtual OPA instance in memory that is configured using a configuration file in the same configuration format that a standalone OPA would use. To allow for configurability, the configuration file is interpolated using Go Templates to allow every virtual instance to pull different bundles. This template file is passed using the -open-policy-agent-config-template flag.

    "},{"location":"tutorials/auth/#configuration-file","title":"Configuration File","text":"

    As an example the following initial config can be used

    services:\n  - name: bundle-service\n    url: https://my-example-opa-bucket.s3.eu-central-1.amazonaws.com\n    credentials:\n      s3_signing:\n        environment_credentials: {}\nlabels:\n  environment: production\ndiscovery:\n  name: discovery\n  prefix: \"/applications/{{ .bundlename }}\"\n

    The variable .bundlename is the first argument in the following filters and can be in any format that OPA can understand, so for example application IDs from a registry, uuids, \u2026

    "},{"location":"tutorials/auth/#input-structures","title":"Input Structures","text":"

    Input structures to policies follow those that are used by the opa-envoy-plugin, the existing examples and documentation apply also to Skipper. Please note that the filters in Skipper always generate v3 input structures.

    "},{"location":"tutorials/auth/#passing-context-to-the-policy","title":"Passing context to the policy","text":"

    Generally there are two ways to pass context to a policy:

    1. as part of the labels in Open Policy Agent (configured in the configuration file, see below) that should be used for deployment level taxonomy,
    2. as part of so called context extensions that are part of the Envoy external auth specification.

    This context can be passed as second argument to filters:

    opaAuthorizeRequest(\"my-app-id\", \"com.mycompany.myprop: myvalue\") or opaAuthorizeRequest(\"my-app-id\", \"{'com.mycompany.myprop': 'my value'}\")

    The second argument is parsed as YAML, cannot be nested and values need to be strings.

    In Rego this can be used like this input.attributes.contextExtensions[\"com.mycompany.myprop\"] == \"my value\"

    "},{"location":"tutorials/auth/#quick-start-rego-playground","title":"Quick Start Rego Playground","text":"

    A quick way without setting up Backend APIs is to use the Rego Playground.

    To get started pick from examples Envoy > Hello World. Click on \u201cPublish\u201d and note the random ID in the section \u201cRun OPA with playground policy\u201d.

    Place the following file in your local directory with the name opaconfig.yaml

    bundles:\n  play:\n    resource: bundles/{{ .bundlename }}\n    polling:\n      long_polling_timeout_seconds: 45\nservices:\n  - name: play\n    url: https://play.openpolicyagent.org\nplugins:\n  envoy_ext_authz_grpc:\n    # This needs to match the package, defaulting to envoy/authz/allow\n    path: envoy/http/public/allow\n    dry-run: false\ndecision_logs:\n  console: true\n

    Start Skipper with

    skipper -enable-open-policy-agent -open-policy-agent-config-template opaconfig.yaml \\\n  -inline-routes 'notfound: * -> opaAuthorizeRequest(\"<playground-bundle-id>\") -> inlineContent(\"<h1>Authorized Hello</h1>\") -> <shunt>'\n

    You can test the policy with

    • Authorized: curl http://localhost:9090/ -i
    • Authorized: curl http://localhost:9090/foobar -H \"Authorization: Basic charlie\" -i
    • Forbidden: curl http://localhost:9090/foobar -i
    "},{"location":"tutorials/basics/","title":"Basics","text":""},{"location":"tutorials/basics/#architecture","title":"Architecture","text":"

    The core business of skipper is routing based on HTTP. It performs and scales well, for example it handles more than 800000 routes in production with 60000 requests per second.

    Skipper is written as a library and is also a multi binary project with 2 binaries, named skipper and eskip. Skipper is the HTTP proxy and eskip is a CLI application to verify, print, update or delete Skipper routes.

    Skipper\u2019s internal architecture is split into different packages. The skipper package has connections to multiple dataclient, that pull information from different sources, for example local routes from an eskip file or dynamic routes from Kubernetes ingress objects.

    The proxy package gets the routes populated by skipper and has always a current routing table which will be replaced on change.

    A route is one entry in the routing table. A route consists of one or more predicate, that are used to find a route for a given HTTP request. A route can also have one or more filter, that can modify the content of the request or response. A route can point to a backend, it can be a <shunt>, meaning that skipper serves the requests for the route, a <loopback>, meaning that the requests will be matched against the routing table again after filters have modified them, or a <dynamic>, meaning that the target backend must be set in a filter.

    Opentracing API is supported via tracers and you can find all of them in ./tracing/tracers/. For example Jaeger is supported.

    Skipper has a rich set of metrics that are exposed as json, but can also be exported in Prometheus format.

    "},{"location":"tutorials/basics/#concepts","title":"Concepts","text":""},{"location":"tutorials/basics/#route-definition","title":"Route definition","text":"

    A route consists of an ID, predicates, filters and a backend and is most often written in eskip syntax.

    Syntax:

    ID:\n        Predicate1() && .. && PredicateN()\n        -> filter1()\n        ...\n        -> filterN()\n        -> BACKEND\n

    An example routing configuration:

    baidu:\n        Path(\"/baidu\")\n        -> setRequestHeader(\"Host\", \"www.baidu.com\")\n        -> setPath(\"/s\")\n        -> setQuery(\"wd\", \"godoc skipper\")\n        -> \"http://www.baidu.com\";\ngoogle:\n        *\n        -> setPath(\"/search\")\n        -> setQuery(\"q\", \"godoc skipper\")\n        -> \"https://www.google.com\";\nyandex:\n        * && Cookie(\"yandex\", \"true\")\n        -> setPath(\"/search/\")\n        -> setQuery(\"text\", \"godoc skipper\")\n        -> tee(\"http://127.0.0.1:12345/\")\n        -> \"https://yandex.ru\";\n
    "},{"location":"tutorials/basics/#predicate","title":"Predicate","text":"

    A Predicate adds a matching rule to a route. For example the Cookie predicate, Cookie(\"yandex\", \"true\"), matched if there is a cookie in the request with name \u201cyandex\u201d and the value is \u201ctrue\u201d, else the route processing will go on and try to find another matching route for the given request. Multiple predicates can be combined by && which means a logical AND. If you need a logical OR, you have to create another route.

    Special Predicates:

    • * catch all is always true
    • Path() reduces the number of routes in O(log n) time to scan afterwards a subset in linear time
    • PathSubtree() reduces the number of routes O(log n) time to scan afterwards a subset in linear time
    "},{"location":"tutorials/basics/#predicate-and-routing-table","title":"Predicate and routing table","text":"

    A routing table consists of a number of routes. A route has a list of predicates and filters. Predicates match an incoming request to a specific, best matching, route. Each route has a set of filters.

    "},{"location":"tutorials/basics/#filter","title":"Filter","text":"

    A filter changes a HTTP request or response or both. Multiple filters can be concatenated by ->.

    Some special filters are:

    • inlineContent() sets the HTTP response body, should be used with status() filter and backend
    • static() serves static files and should be used with backend
    • status() sets HTTP status code to a given value, should be used with backend
    • tee() clones request to given target
    • "},{"location":"tutorials/basics/#filter-in-context-of-an-http-request","title":"Filter in context of an HTTP request","text":"

      The picture shows the transformation of the requests and responses

      "},{"location":"tutorials/basics/#backend","title":"Backend","text":"

      The last entry of a route is the backend definition, that will be called with the result request after filter processing. Normally this is an URL string.

      Special backends:

      • <loopback> restart route processing with the possibly changed request
      • <shunt> stops processing, used for fast returns
      • <dynamic> target is set dynamically in a filter
      • <$algorithm, \"be1\", \"be2\", ..., \"beN\"> load balanced backend with N backends

      See more about backends in backend references.

      "},{"location":"tutorials/basics/#dataclient","title":"Dataclient","text":"

      Dataclients are used to pull route information from a data source. The data will be used to create routes according to the dataclient. As a special case, for example kubernetes dataclient automatically adds HTTP->HTTPS redirects if skipper is started with -kubernetes-https-redirect.

      Dataclients:

      • eskip file
      • remote eskip
      • route string
      • kubernetes
      • etcd
      "},{"location":"tutorials/basics/#route-processing","title":"Route processing","text":"

      Package skipper has a Go http.Server and does the ListenAndServe call with the loggingHandler wrapped proxy. The loggingHandler is basically a middleware for the proxy providing access logs and both implement the plain Go http.Handler interface.

      For each incoming http.Request the proxy will create a request context and enhance it with an Opentracing API Span. It will check proxy global ratelimits first and after that lookup the route in the routing table. After that skipper will apply all request filters, that can modify the http.Request. It will then check the route local ratelimits, the circuitbreakers and do the backend call. If the backend call got a TCP or TLS connection error in a loadbalanced route, skipper will do a retry to another backend of that loadbalanced group automatically. Just before the response to the caller, skipper will process the response filters, that can change the http.Response.

      In two special cases, skipper doesn\u2019t forward the request to the backend. When the route is shunted (<shunt>), skipper serves the request alone, by using only the filters. When the route is a <loopback>, the request is passed to the routing table for finding another route, based on the changes that the filters made to the request. In case it will always find a <loopback> route it will stop after maximum number of loopbacks is reached and logs an error.

      "},{"location":"tutorials/basics/#routing-mechanism","title":"Routing mechanism","text":"

      The routing executes the following steps in the typical case:

      1. Select the best fitting route by matching the request against the predicates. When no route found, respond with 404 (unless the default status code is configured to a different value).

      2. Execute the filters defined in the route in normal order on the request. The filters may or may not alter the request.

      3. Forward the request to the backend defined by the route and receive a response.

      4. Execute the filters defined in the route in reverse order on the response. The filters may or may not alter the response.

      5. Respond to the incoming request with the resulting response.

      "},{"location":"tutorials/basics/#route-matching","title":"Route matching","text":"

      Skipper can handle a relatively large number of routes with acceptable performance, while being able to use any attribute of the incoming HTTP requests to distinguish between them. In order to be able to do so, the path matching predicates (Path() and PathSubtree() but not PathRegexp()) have a special role during route matching, which is a tradeoff by design, and needs to be kept in mind to understand in some cases why a certain route was matched for a request instead of another.

      The route matching logic can be summed up as follows:

      1. Lookup in the path tree based on the Path() and the PathSubtree() predicates, using the path component of the incoming request\u2019s URI. Then the remaining predicates of the found route(s) are evaluated.

        • the path lookup is a radix tree with O(log(n)) time complexity

        • in case of intersecting paths, the more specific path is matched in the tree

        • PathRegexp() is not used in the tree, but it is evaluated only after Path() or PathSubtree(), just like e.g. Method() or Host().

      2. If step #1 matches multiple routes, which means there are multiple routes in the same position of the path tree, and all other predicates match the request, too, then the route with the highest weight is matched.

        • this is an O(n) lookup, but only on the same leaf

        • the root of the tree is considered a single leaf, so if not using the Path() or PathSubtree() predicates, the entire lookup will become O(n) over all the routes.

      3. If #2 results in multiple matching routes, then one route will be selected. It is unspecified which one.

      See more details about the predicates here: Predicates.

      "},{"location":"tutorials/basics/#route-creation","title":"Route creation","text":"

      Skipper has two kind of routes:

      1. eskip.Route
      2. routing.Route

      An eskip.Route is the parsed representation of user input. This will be converted to a routing.Route, when the routing table is built. A tree of routing.Route will be used to match an incoming Request to a route.

      Route creation steps:

      1. Skipper\u2019s route creation starts with the Dataclient to fetch routes ([]*eskip.Route).
      2. These will be first processed by []routing.PreProcessor. PreProcessors are able to add, remove, modify all []*eskip.Route.
      3. After that []*eskip.Route are converted to []*routing.Route.
      4. []routing.PostProcessor are executed. PostProcessors are a ble to add, remove, modify all []*routing.Route.
      5. Last the active routing table is swapped. Now all incoming requests are handled by the new routing table
      "},{"location":"tutorials/basics/#building-skipper","title":"Building skipper","text":""},{"location":"tutorials/basics/#local-build","title":"Local build","text":"

      To get a local build of skipper for your CPU architecture, you can run make skipper. To cross compile to non Linux platforms you can use:

      • make build.darwin for Mac OS X (amd64)
      • make build.windows for Windows (amd64)

      The local build will write into ./bin/ directory.

      "},{"location":"tutorials/basics/#ci-build","title":"CI build","text":"

      The current used CI flow to build the official docker container, you can see in delivery.yaml. Official release versions you will find at registry.opensource.zalan.do/teapot/skipper:${RELEASE_VERSION}, where ${RELEASE_VERSION} is the git tag got by $(git describe --tags --always --dirty).

      Test versions are released at registry.opensource.zalan.do/teapot/skipper-test:${CDP_BUILD_VERSION} for every pull request, limited to only repository members, because of compliance and security reasons.

      "},{"location":"tutorials/basics/#testing-routes","title":"Testing routes","text":"

      To test routes you can use a local build of skipper and pass arguments -inline-routes=<route string> or for more complex ones use a local eskip file on disk and use -routes-file=<filepath>.

      Example:

      ./bin/skipper -address :9999 -inline-routes 'r: * -> setQuery(\"lang\", \"pt\") -> \"http://127.0.0.1:8080/\"'\n

      Now you have a proxy running that will set a query to your request URL and call http://127.0.0.1:8080/?lang=pt

      The simplest way of testing a proxy is using a local backend and a local browser.

      Local backend example:

      ./bin/skipper -address :8080 -inline-routes 'r: * -> inlineContent(\"Hello world!\") -> status(200) -> <shunt>'\n

      If you want to do the request and see the response in detail, you can use curl as a browser, which should be installed on most Linux and Mac OS X computers.

      Example client call to our defined proxy:

      % curl localhost:8080 -v\n* Rebuilt URL to: localhost:8080/\n*   Trying ::1...\n* Connected to localhost (::1) port 8080 (#0)\n> GET / HTTP/1.1\n> Host: localhost:8080\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Content-Length: 12\n< Content-Type: text/plain; charset=utf-8\n< Server: Skipper\n< Date: Thu, 01 Nov 2018 15:54:13 GMT\n<\n* Connection #0 to host localhost left intact\nHello world!\n
      "},{"location":"tutorials/basics/#yaml-configuration","title":"YAML Configuration","text":"

      The usage of flags to configure the skipper binary can get quickly out of hand. You can use a yaml file instead to populate the flags presented in the skipper -help command.

      kubernetes: true\nkubernetes-in-cluster: true\nkubernetes-https-redirect: true\nproxy-preserve-host: true\nserve-host-metrics: true\naddress: \":8080\"\nenable-ratelimits: true\nexperimental-upgrade: true\nmetrics-exp-decay-sample: true\nlb-healthcheck-interval: \"3s\"\nmetrics-flavour: [\"codahale\",\"prometheus\"]\nenable-connection-metrics: true\nwhitelisted-healthcheck-cidr: \"172.20.0.0/16\"\nignore-trailing-slash: true\ninline-routes: 'r: * -> inlineContent(\"Hello world!\") -> status(200) -> <shunt>'\n

      Considering that this file would be named config.yaml you can use it to populate the flags using the config-file flag:

      ./bin/skipper -config-file=config.yaml\n

      Performing the same call to the address as exemplified in the previous section should yield the same results.

      "},{"location":"tutorials/basics/#current-routing-table","title":"Current routing table","text":"

      To investigate the current routing table skipper has loaded into its memory, you can use the -support-listener, which defaults to port 9911 and you have to do a GET request to the /routes endpoint.

      Example:

      % curl localhost:9911/routes\nr: *\n  -> setQuery(\"lang\", \"pt\")\n  -> \"http://127.0.0.1:8000\";\n

      If you do not see your route, then you have most probably a syntax error in your route definition, such that the route was not loaded into memory.

      To print the number of routes, X-Count header, and the last update timestamp, X-Timestamp header, you can use a HEAD request to the support listener /routes endpoint:

      % curl -I localhost:9911/routes\nHTTP/1.1 200 OK\nContent-Type: text/plain\nX-Count: 1\nX-Timestamp: 1541086036\nDate: Fri, 02 Nov 2018 00:30:43 GMT\n

      For skipper operators the number of routes can be interesting for statistics and the timestamp to detect skipper instances that have not updated its routing table.

      If there is more than 1024 routes used, then the paging the results is possible with the offset and limit query parameters:

      curl localhost:9911/routes?offset=2048&limit=512\n
      "},{"location":"tutorials/basics/#route-ids","title":"Route IDs","text":"

      In the following example rid is the route ID:

      % curl localhost:9911/routes\nrid: *\n  -> setQuery(\"lang\", \"pt\")\n  -> \"http://127.0.0.1:8000\";\n

      If the route ID has a prefix kube_, then it is a route created by the Kubernetes dataclient. We do not disallow that you create manually routes with kube_ prefix, but most of the time you should not use it in other routes to differentiate the routes created by other dataclients, in case you use multiple at the same time.

      "},{"location":"tutorials/built-your-own/","title":"Built your own skipper proxy","text":"

      One of the biggest advantages of skipper compared to other HTTP proxies is that skipper is a library first design. This means that it is common to built your custom proxy based on skipper.

      A minimal example project is skipper-example-proxy.

      /*\nThis command provides an executable version of skipper with the default\nset of filters.\n\nFor the list of command line options, run:\n\n    skipper -help\n\nFor details about the usage and extensibility of skipper, please see the\ndocumentation of the root skipper package.\n\nTo see which built-in filters are available, see the skipper/filters\npackage documentation.\n*/\npackage main\n\nimport (\n    log \"github.com/sirupsen/logrus\"\n    lfilters \"github.com/szuecs/skipper-example-proxy/filters\"\n    \"github.com/zalando/skipper\"\n    \"github.com/zalando/skipper/config\"\n)\n\nfunc main() {\n    cfg := config.NewConfig()\n    if err := cfg.Parse(); err != nil {\n        log.Fatalf(\"Error processing config: %s\", err)\n    }\n\n    log.SetLevel(cfg.ApplicationLogLevel)\n\n    opt := cfg.ToOptions()\n    opt.CustomFilters = append(opt.CustomFilters, lfilters.NewMyFilter())\n\n    log.Fatal(skipper.Run(opt))\n}\n
      "},{"location":"tutorials/built-your-own/#code","title":"Code","text":"

      Write the code and use the custom filter implemented in https://github.com/szuecs/skipper-example-proxy/blob/main/filters/custom.go

      [:~]% mkdir -p /tmp/go/skipper\n[:~]% cd /tmp/go/skipper\n[:/tmp/go/skipper]% go mod init myproject\ngo: creating new go.mod: module myproject\n[:/tmp/go/skipper]% cat >main.go\npackage main\n\nimport (\n    log \"github.com/sirupsen/logrus\"\n    lfilters \"github.com/szuecs/skipper-example-proxy/filters\"\n    \"github.com/zalando/skipper\"\n    \"github.com/zalando/skipper/config\"\n)\n\nfunc main() {\n    cfg := config.NewConfig()\n    if err := cfg.Parse(); err != nil {\n        log.Fatalf(\"Error processing config: %s\", err)\n    }\n\n    log.SetLevel(cfg.ApplicationLogLevel)\n\n    opt := cfg.ToOptions()\n    opt.CustomFilters = append(opt.CustomFilters, lfilters.NewMyFilter())\n\n    log.Fatal(skipper.Run(opt))\n}\nCTRL-D\n[:/tmp/go/skipper]%\n

      "},{"location":"tutorials/built-your-own/#build","title":"Build","text":"

      Fetch dependencies and build your skipper binary.

      [:/tmp/go/skipper]% go mod tidy\ngo: finding module for package github.com/zalando/skipper/config\ngo: finding module for package github.com/szuecs/skipper-example-proxy/filters\ngo: finding module for package github.com/sirupsen/logrus\ngo: finding module for package github.com/zalando/skipper\ngo: found github.com/sirupsen/logrus in github.com/sirupsen/logrus v1.9.3\ngo: found github.com/szuecs/skipper-example-proxy/filters in github.com/szuecs/skipper-example-proxy v0.0.0-20230622190245-63163cbaabc8\ngo: found github.com/zalando/skipper in github.com/zalando/skipper v0.16.117\ngo: found github.com/zalando/skipper/config in github.com/zalando/skipper v0.16.117\ngo: finding module for package github.com/nxadm/tail\ngo: finding module for package github.com/kr/text\ngo: finding module for package github.com/rogpeppe/go-internal/fmtsort\ngo: found github.com/kr/text in github.com/kr/text v0.2.0\ngo: found github.com/rogpeppe/go-internal/fmtsort in github.com/rogpeppe/go-internal v1.10.0\n...\n\n[:/tmp/go/skipper]% go build -o skipper .\n[:/tmp/go/skipper]%\n

      "},{"location":"tutorials/built-your-own/#test","title":"Test","text":"

      We start the proxy

      # start the proxy\n[:/tmp/go/skipper]% ./skipper -inline-routes='* -> myFilter() -> status(250) -> <shunt>'\n[APP]INFO[0000] Expose metrics in codahale format\n[APP]INFO[0000] enable swarm: false\n[APP]INFO[0000] Replacing tee filter specification\n[APP]INFO[0000] Replacing teenf filter specification\n[APP]INFO[0000] Replacing lua filter specification\n[APP]INFO[0000] support listener on :9911\n[APP]INFO[0000] Dataclients are updated once, first load complete\n[APP]INFO[0000] proxy listener on :9090\n[APP]INFO[0000] TLS settings not found, defaulting to HTTP\n[APP]INFO[0000] route settings, reset, route: : * -> myFilter() -> status(250) -> <shunt>\n[APP]INFO[0000] route settings received\n[APP]INFO[0000] route settings applied\n127.0.0.1 - - [22/Jun/2023:21:13:46 +0200] \"GET /foo HTTP/1.1\" 250 0 \"-\" \"curl/7.49.0\" 0 127.0.0.1:9090 - -\n

      Then we start the client to call the proxy endpoint.

      # client\n% curl -v http://127.0.0.1:9090/foo\n*   Trying 127.0.0.1...\n* Connected to 127.0.0.1 (127.0.0.1) port 9090 (#0)\n> GET /foo HTTP/1.1\n> Host: 127.0.0.1:9090\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 250 status code 250   <-- skipper core filter status(250)\n< My-Filter: response            <-- your custom filter myFilter()\n< Server: Skipper\n< Date: Thu, 22 Jun 2023 19:13:46 GMT\n< Transfer-Encoding: chunked\n<\n* Connection #0 to host 127.0.0.1 left intact\n

      "},{"location":"tutorials/common-use-cases/","title":"Common Use Cases","text":""},{"location":"tutorials/common-use-cases/#common-use-cases","title":"Common Use Cases","text":"

      To understand common use cases, we assume you read the basics.

      "},{"location":"tutorials/common-use-cases/#redirect-handling","title":"Redirect handling","text":"

      If you want to do a redirect from a route, you can use the redirectTo() filter in combination with the <shunt> backend. If you do not specify a path in your redirect, then the path from the client will be passed further and not modified by the redirect.

      Example:

      % ./bin/skipper -address :8080 -inline-routes 'r: * -> redirectTo(308, \"http://127.0.0.1:9999\") -> <shunt>'\n::1 - - [01/Nov/2018:18:42:02 +0100] \"GET / HTTP/1.1\" 308 0 \"-\" \"curl/7.49.0\" 0 localhost:8080 - -\n::1 - - [01/Nov/2018:18:42:08 +0100] \"GET /foo HTTP/1.1\" 308 0 \"-\" \"curl/7.49.0\" 0 localhost:8080 - -\n\n% curl localhost:8080 -v\n* Rebuilt URL to: localhost:8080/\n*   Trying ::1...\n* Connected to localhost (::1) port 8080 (#0)\n> GET / HTTP/1.1\n> Host: localhost:8080\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 308 Permanent Redirect\n< Location: http://127.0.0.1:9999/\n< Server: Skipper\n< Date: Thu, 01 Nov 2018 17:42:18 GMT\n< Content-Length: 0\n<\n* Connection #0 to host localhost left intact\n\n% curl localhost:8080/foo -v\n*   Trying ::1...\n* Connected to localhost (::1) port 8080 (#0)\n> GET /foo HTTP/1.1\n> Host: localhost:8080\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 308 Permanent Redirect\n< Location: http://127.0.0.1:9999/foo\n< Server: Skipper\n< Date: Thu, 01 Nov 2018 17:42:14 GMT\n< Content-Length: 0\n<\n* Connection #0 to host localhost left intact\n
      "},{"location":"tutorials/common-use-cases/#set-absolute-path","title":"set absolute path","text":"

      If you set a path, in this example /, in your redirect definition, then the path is set to the chosen value. The Location header is set in the response to /, but the client sent /foo.

      % ./bin/skipper -address :8080 -inline-routes 'r: * -> redirectTo(308, \"http://127.0.0.1:9999/\") -> <shunt>'\n\n% curl localhost:8080/foo -v\n*   Trying ::1...\n* Connected to localhost (::1) port 8080 (#0)\n> GET /foo HTTP/1.1\n> Host: localhost:8080\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 308 Permanent Redirect\n< Location: http://127.0.0.1:9999/\n< Server: Skipper\n< Date: Thu, 01 Nov 2018 17:47:17 GMT\n< Content-Length: 0\n<\n* Connection #0 to host localhost left intact\n
      "},{"location":"tutorials/common-use-cases/#change-base-path","title":"change base path","text":"

      If you want a redirect definition that adds a base path and the specified path by the client should be appended to this base path you can use the modPath filter just before the redirectTo() to modify the base path as you like.

      Route Example shows, that calls to /a/base/foo/bar would be redirected to https://another-example.com/my/new/base/foo/bar:

      redirect: Path(\"/a/base/\")\n          -> modPath(\"/a/base/\", \"/my/new/base/\")\n          -> redirectTo(308, \"https://another-example.com\")\n          -> <shunt>'\n

      The next example shows how to test a redirect with changed base path on your computer:

      % ./bin/skipper -address :8080 -inline-routes 'r: * -> modPath(\"/\", \"/my/new/base/\") -> redirectTo(308, \"http://127.0.0.1:9999\") -> <shunt>'\n::1 - - [01/Nov/2018:18:49:45 +0100] \"GET /foo HTTP/1.1\" 308 0 \"-\" \"curl/7.49.0\" 0 localhost:8080 - -\n\n% curl localhost:8080/foo -v\n*   Trying ::1...\n* Connected to localhost (::1) port 8080 (#0)\n> GET /foo HTTP/1.1\n> Host: localhost:8080\n> User-Agent: curl/7.49.0\n> Accept: */*\n>\n< HTTP/1.1 308 Permanent Redirect\n< Location: http://127.0.0.1:9999/my/new/base/foo\n< Server: Skipper\n< Date: Thu, 01 Nov 2018 17:49:45 GMT\n< Content-Length: 0\n<\n* Connection #0 to host localhost left intact\n
      "},{"location":"tutorials/development/","title":"Development","text":""},{"location":"tutorials/development/#local-setup","title":"Local Setup","text":""},{"location":"tutorials/development/#build-skipper-binary","title":"Build Skipper Binary","text":"

      Clone repository and compile with Go.

      git clone https://github.com/zalando/skipper.git\ncd skipper\nmake skipper\n

      binary will be ./bin/skipper

      "},{"location":"tutorials/development/#run-skipper-as-proxy-with-2-backends","title":"Run Skipper as Proxy with 2 backends","text":"

      As a small example, we show how you can run one proxy skipper and 2 backend skippers.

      Start the proxy that listens on port 9999 and serves all requests with a single route, that proxies to two backends using the round robin algorithm:

      ./bin/skipper -inline-routes='r1: * -> <roundRobin, \"http://127.0.0.1:9001\", \"http://127.0.0.1:9002\">' --address :9999\n

      Start two backends, with similar routes, one responds with \u201c1\u201d and the other with \u201c2\u201d in the HTTP response body:

      ./bin/skipper -inline-routes='r1: * -> inlineContent(\"1\") -> <shunt>' --address :9001 &\n./bin/skipper -inline-routes='r1: * -> inlineContent(\"2\") -> <shunt>' --address :9002\n

      Test the proxy with curl as a client:

      curl -s http://localhost:9999/foo\n1\ncurl -s http://localhost:9999/foo\n2\ncurl -s http://localhost:9999/foo\n1\ncurl -s http://localhost:9999/foo\n2\n

      "},{"location":"tutorials/development/#debugging-skipper","title":"Debugging Skipper","text":"

      It can be helpful to run Skipper in a debug session locally that enables one to inspect variables and do other debugging activities in order to analyze filter and token states.

      For Visual Studion Code users, a simple setup could be to create following launch configuration that compiles Skipper, runs it in a Delve debug session, and then opens the default web browser creating the request. By setting a breakpoint, you can inspect the state of the filter or application. This setup is especially useful when inspecting oauth flows and tokens as it allows stepping through the states.

      Example `.vscode/launch.json` file
      {\n    \"version\": \"0.2.0\",\n    \"configurations\": [\n        {\n            \"name\": \"Launch Package\",\n            \"type\": \"go\",\n            \"request\": \"launch\",\n            \"mode\": \"debug\",\n            \"program\": \"${workspaceFolder}/cmd/skipper/main.go\",\n            \"args\": [\n                \"-application-log-level=debug\",\n                \"-address=:9999\",\n                \"-inline-routes=PathSubtree(\\\"/\\\") -> inlineContent(\\\"Hello World\\\") -> <shunt>\",\n               // example OIDC setup, using https://developer.microsoft.com/en-us/microsoft-365/dev-program\n               //  \"-oidc-secrets-file=${workspaceFolder}/.vscode/launch.json\",\n               //  \"-inline-routes=* -> oauthOidcAnyClaims(\\\"https://login.microsoftonline.com/<tenant Id>/v2.0\\\",\\\"<application id>\\\",\\\"<client secret>\\\",\\\"http://localhost:9999/authcallback\\\", \\\"profile\\\", \\\"\\\", \\\"\\\", \\\"x-auth-email:claims.email x-groups:claims.groups\\\") -> inlineContent(\\\"restricted access\\\") -> <shunt>\",\n            ],\n            \"serverReadyAction\": {\n                \"pattern\": \"route settings applied\",\n                \"uriFormat\": \"http://localhost:9999\",\n                \"action\": \"openExternally\"\n            }\n        }\n    ]\n}\n
      "},{"location":"tutorials/development/#docs","title":"Docs","text":"

      We have user documentation and developer documentation separated. In docs/ you find the user documentation in mkdocs format and rendered at https://opensource.zalando.com/skipper which is updated automatically with each docs/ change merged to master branch. Developer documentation for skipper as library users godoc format is used and rendered at https://godoc.org/github.com/zalando/skipper.

      "},{"location":"tutorials/development/#user-documentation","title":"User documentation","text":"

      To see rendered documentation locally run mkdocs serve and navigate to http://127.0.0.1:8000.

      "},{"location":"tutorials/development/#filters","title":"Filters","text":"

      Filters allow to change arbitrary HTTP data in the Request or Response. If you need to read and write the http.Body, please make sure you discuss the use case before creating a pull request.

      A filter consists of at least two types a filters.Spec and a filters.Filter. Spec consists of everything that is needed and known before a user will instantiate a filter.

      A spec will be created in the bootstrap procedure of a skipper process. A spec has to satisfy the filters.Spec interface Name() string and CreateFilter([]interface{}) (filters.Filter, error).

      The actual filter implementation has to satisfy the filter.Filter interface Request(filters.FilterContext) and Response(filters.FilterContext).

      The simplest filter possible is, if filters.Spec and filters.Filter are the same type:

      type myFilter struct{}\n\nfunc NewMyFilter() *myFilter {\n    return &myFilter{}\n}\n\nfunc (spec *myFilter) Name() string { return \"myFilter\" }\n\nfunc (spec *myFilter) CreateFilter(config []interface{}) (filters.Filter, error) {\n     return NewMyFilter(), nil\n}\n\nfunc (f *myFilter) Request(ctx filters.FilterContext) {\n     // change data in ctx.Request() for example\n}\n\nfunc (f *myFilter) Response(ctx filters.FilterContext) {\n     // change data in ctx.Response() for example\n}\n

      Find a detailed example at how to develop a filter.

      "},{"location":"tutorials/development/#filters-with-cleanup","title":"Filters with cleanup","text":"

      Sometimes your filter needs to cleanup resources on shutdown. In Go functions that do this have often the name Close(). There is the filters.FilterCloser interface that if you comply with it, the routing.Route will make sure your filters are closed in case of routing.Routing was closed.

      type myFilter struct{}\n\nfunc NewMyFilter() *myFilter {\n    return &myFilter{}\n}\n\nfunc (spec *myFilter) Name() string { return \"myFilter\" }\n\nfunc (spec *myFilter) CreateFilter(config []interface{}) (filters.Filter, error) {\n     return NewMyFilter(), nil\n}\n\nfunc (f *myFilter) Request(ctx filters.FilterContext) {\n     // change data in ctx.Request() for example\n}\n\nfunc (f *myFilter) Response(ctx filters.FilterContext) {\n     // change data in ctx.Response() for example\n}\n\nfunc (f *myFilter) Close() error {\n     // cleanup your filter\n}\n
      "},{"location":"tutorials/development/#filters-with-error-handling","title":"Filters with error handling","text":"

      Sometimes you want to have a filter that wants to get called Response() even if the proxy will not send a response from the backend, for example you want to count error status codes, like the admissionControl filter. In this case you need to comply with the following proxy interface:

      // errorHandlerFilter is an opt-in for filters to get called\n// Response(ctx) in case of errors.\ntype errorHandlerFilter interface {\n    // HandleErrorResponse returns true in case a filter wants to get called\n    HandleErrorResponse() bool\n}\n

      Example:

      type myFilter struct{}\n\nfunc NewMyFilter() *myFilter {\n    return &myFilter{}\n}\n\nfunc (spec *myFilter) Name() string { return \"myFilter\" }\n\nfunc (spec *myFilter) CreateFilter(config []interface{}) (filters.Filter, error) {\n     return NewMyFilter(), nil\n}\n\nfunc (f *myFilter) Request(ctx filters.FilterContext) {\n     // change data in ctx.Request() for example\n}\n\nfunc (f *myFilter) Response(ctx filters.FilterContext) {\n     // change data in ctx.Response() for example\n}\n\nfunc (f *myFilter) HandleErrorResponse() bool() {\n     return true\n}\n

      "},{"location":"tutorials/development/#predicates","title":"Predicates","text":"

      Predicates allow to match a condition, that can be based on arbitrary HTTP data in the Request. There are also predicates, that use a chance Traffic() or the current local time, for example After(), to match a request and do not use the HTTP data at all.

      A predicate consists of at least two types routing.Predicate and routing.PredicateSpec, which are both interfaces.

      A spec will be created in the bootstrap procedure of a skipper process. A spec has to satisfy the routing.PredicateSpec interface Name() string and Create([]interface{}) (routing.Predicate, error).

      The actual predicate implementation has to satisfy the routing.Predicate interface Match(*http.Request) bool and returns true if the predicate matches the request. If false is returned, the routing table will be searched for another route that might match the given request.

      The simplest possible predicate implementation is, if routing.PredicateSpec and routing.Predicate are the same type:

      type myPredicate struct{}\n\nfunc NewMyPredicate() routing.PredicateSpec {\n    return &myPredicate{}\n}\n\nfunc (spec *myPredicate) Name() string { return \"myPredicate\" }\n\nfunc (spec *myPredicate) Create(config []interface{}) (routing.Predicate, error) {\n     return NewMyPredicate(), nil\n}\n\nfunc (f *myPredicate) Match(r *http.Request) bool {\n     // match data in *http.Request for example\n     return true\n}\n

      Predicates are quite similar to implement as Filters, so for a more complete example, find an example how to develop a filter.

      "},{"location":"tutorials/development/#dataclients","title":"Dataclients","text":"

      Dataclients are the way how to integrate new route sources. Dataclients pull information from a source and create routes for skipper\u2019s routing table.

      You have to implement routing.DataClient, which is an interface that defines function signatures LoadAll() ([]*eskip.Route, error) and LoadUpdate() ([]*eskip.Route, []string, error).

      The LoadUpdate() method can be implemented either in a way that returns immediately, or blocks until there is a change. The routing package will regularly call the LoadUpdate() method with a small delay between the calls.

      A complete example is the routestring implementation, which fits in less than 50 lines of code.

      "},{"location":"tutorials/development/#opentracing","title":"Opentracing","text":"

      Your custom Opentracing implementations need to satisfy the opentracing.Tracer interface from https://github.com/opentracing/opentracing-go and need to be loaded as a plugin, which might change in the future. Please check the tracing package and ask for further guidance in our community channels.

      "},{"location":"tutorials/development/#core","title":"Core","text":"

      Non trivial changes, proposals and enhancements to the core of skipper should be discussed first in a Github issue, such that we can think about how this fits best in the project and how to achieve the most useful result. Feel also free to reach out to our community channels and discuss there your idea.

      Every change in core has to have tests included and should be a non breaking change. We planned since a longer time a breaking change, but we should coordinate to make it as good as possible for all skipper as library users. Most often a breaking change can be postponed to the future and a feature independently added and the old feature might be deprecated to delete it later. Use of deprecated features should be shown in logs with a log.Warning.

      "},{"location":"tutorials/operations/","title":"Operations","text":""},{"location":"tutorials/operations/#kubernetes","title":"Kubernetes","text":"

      In the beginning we chose to run Skipper as daemonset to run it on all worker nodes. Since 2018 we run Skipper as deployment with an hpa, horizontal Pod autoscaler, to scale Skipper by CPU usage. All our clusters are using AWS autoscaling groups (ASG), to increase and decrease the number of running nodes in a cluster based on use.

      In both deployment styles we run Skipper with hostnetwork: true and point the loadbalancer in front of it to the skipper port of all worker nodes. In our case we run an AWS Application loadbalancer (ALB) in front, and we terminate TLS on the ALB. A health check from the ALB detects, if Skipper is running on a worker node or not.

      "},{"location":"tutorials/operations/#first-steps","title":"First steps","text":"

      The next part will show you how to run Skipper with a minimal feature set, that supports already most of the features.

      A minimal set of arguments that should be chosen to support most Kubernetes use cases:

                - \"skipper\"\n          - \"-kubernetes\"\n          - \"-kubernetes-in-cluster\"\n          - \"-kubernetes-path-mode=path-prefix\"\n          - \"-address=:9999\"\n          - \"-wait-first-route-load\"\n          - \"-proxy-preserve-host\"\n          - \"-enable-ratelimits\"\n          - \"-experimental-upgrade\"\n          - \"-lb-healthcheck-interval=3s\"\n          - \"-metrics-flavour=prometheus\"\n          - \"-metrics-exp-decay-sample\"\n          - \"-serve-host-metrics\"\n          - \"-disable-metrics-compat\"\n          - \"-enable-connection-metrics\"\n          - \"-histogram-metric-buckets=.0001,.00025,.0005,.00075,.001,.0025,.005,.0075,.01,.025,.05,.075,.1,.2,.3,.4,.5,.75,1,2,3,4,5,7,10,15,20,30,60,120,300,600\"\n          - \"-max-audit-body=0\"\n          - \"-idle-timeout-server=62s\"\n

      Skipper started with these options will support instance based ratelimits, a wide range of Prometheus metrics, websockets and a better HTTP path routing than the default Kubernetes Ingress spec supports.

      The former Kubernetes Ingress v1beta1 spec defined a path as regular expression, which is not what most people would expect, nor want. Skipper defaults in Kubernetes to use the PathRegexp predicate for routing, because of the spec. We believe the better default is the path prefix mode, that uses PathSubtree predicate, instead. Path prefix search is much more scalable and can not lead to unexpected results by not so experienced regular expressions users. Since Kubernetes v1.18, Ingress v1 path definition supports all path matching modes that are common in skipper:

      • pathType: Exact maps to Path()
      • pathType: Prefix maps to PathSubtree()
      • pathType: ImplementationSpecific is defined as you set path prefix mode.

      To find more information about Metrics, including formats and example Prometheus queries you find in the metrics section. The settings shown above support system and application metrics to carefully monitor Skipper and your backend applications. Backend application metrics get error rates and latency buckets based on host headers. The chosen options are a good setup to safely run all workloads from small to high traffic.

      The option -max-audit-body=0, won\u2019t log the HTTP body, if you would do audit logging, to have a safe default.

      The last option -idle-timeout-server=62s was chosen, because of a known issue, if you run in a multi layer loadbalancer, with ALBs in front of Skipper. ALBs idle connection timeout is 60s and AWS support told us to run the backends with a bigger timeout, than the ALB in front.

      "},{"location":"tutorials/operations/#opt-in-more-features","title":"Opt-In more features","text":""},{"location":"tutorials/operations/#reverse-source-predicate","title":"Reverse Source Predicate","text":"

      Depending on the HTTP loadbalancer in front of your Skippers, you might want to set -reverse-source-predicate. This setting reverses the lookup of the client IP to find it in the X-Forwarded-For header values. If you do not care about clientRatelimits based on X-Forwarded-For headers, you can also ignore this.

      "},{"location":"tutorials/operations/#cluster-ratelimit","title":"Cluster Ratelimit","text":"

      Ratelimits can be calculated for the whole cluster instead of having only the instance based ratelimits. The common term we use in skipper documentation is cluster ratelimit. There are two option, but we highly recommend the use of Redis based cluster ratelimits. To support redis based cluster ratelimits you have to use -enable-swarm and add a list of URLs to redis -swarm-redis-urls=skipper-ingress-redis-0.skipper-ingress-redis.kube-system.svc.cluster.local:6379,skipper-ingress-redis-1.skipper-ingress-redis.kube-system.svc.cluster.local:6379. We run redis as statefulset with a headless service to have predictable names. We chose to not use a persistent volume, because storing the data in memory is good enough for this use case.

      "},{"location":"tutorials/operations/#east-west","title":"East West","text":"

      Attention

      This feature is deprecated. Consider using EastWest Range.

      Skipper supports cluster internal service-to-service communication as part of running as an API Gateway with an East-West setup. You have to add -enable-kubernetes-east-west and optionally choose a domain -kubernetes-east-west-domain=.ingress.cluster.local. Be warned: There is a known bug, if you combine it with custom routes. You might want to consider EastWest Range.

      "},{"location":"tutorials/operations/#east-west-range","title":"East West Range","text":"

      Alternatively, you can use Kubernetes East West Range feature. Use the flag -kubernetes-east-west-range-domains to define the cluster internal domains -kubernetes-east-west-range-predicates to define the predicates that will be appended to every route identified as an internal domain. Differently from the -enable-kubernetes-east-west and the -kubernetes-east-west-domain=.ingress.cluster.local flags (check East West) this feature will not automatically create routes for you and both features shouldn\u2019t be used in combination. The ingress and/or route groups resources must opt-in for east west range routes, explicitly defining them. For example, given that Skipper was initialized with the following east-west range flags:

      skipper \\\n  -kubernetes-east-west-range-domains=\"ingress.cluster.local\" \\\n  -kubernetes-east-west-range-predicates='ClientIP(\"10.2.0.0/16\")'\n

      and the following ingress is defined:

      apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: qux\n  namespace: foo\nspec:\n  rules:\n  - host: example.ingress.cluster.local\n    http:\n      paths:\n      - path: \"/\"\n        pathType: Prefix\n        backend:\n          service:\n            name: qux\n            port:\n              name: baz\n

      Skipper will secure this route adding the predicate ClientIP(\"10.2.0.0/16\").

      The same ingress might be used for internal and external hostnames. For example, given a slightly modified version of the ingress:

      apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n...\nspec:\n  rules:\n  - host: example.ingress.cluster.local\n    http: ...\n  - host: example.mydomain.org\n    http: ...\n

      will make the service accessible through example.ingress.cluster.local and example.mydomain.org, but the first hostname will only accept connections from the network 10.2.0.0/16, on this specific scenario.

      You can specify multiple east-west range domains and predicates:

      skippper \\\n  -kubernetes-east-west-range-domains=\"ingress.cluster.local,another.cluster.local\"\n  -kubernetes-east-west-range-predicates='ClientIP(\"10.2.0.0/16\") && SourceLastFrom(\"10.2.0.0/16\")'\n
      "},{"location":"tutorials/operations/#api-monitoring-and-auth","title":"API monitoring and Auth","text":"

      As part of API Gateway features, skipper supports API monitoring and common authentication and authorization protocols in Microservices architectures.

      "},{"location":"tutorials/operations/#opentracing","title":"OpenTracing","text":"

      Skipper has support for different OpenTracing API vendors, including jaeger, lightstep and instana. For example to configure the lightstep opentracing plugin, with a searchable component and cluster tag you can use: - \"-opentracing=lightstep component-name=skipper-ingress token=$(LIGHTSTEP_TOKEN) collector=tracing-collector.endpoint:8444 cmd-line=skipper-ingress max-buffered-spans=4096 tag=cluster=mycluster\". The LIGHTSTEP_TOKEN is passed as environment variable to the process.

      "},{"location":"tutorials/operations/#global-default-filters","title":"Global default filters","text":"

      Skipper can also add global default filters, which will be automatically added to all routes. For example you can use -default-filters-prepend=\"enableAccessLog(4,5)\" to enable only access logs in case of HTTP codes 4xx or 5xx. In the specific case of *AccessLog filters and -default-filters-prepend, the default choice can be overridden by users via zalando.org/skipper-filter ingress annotation.

      "},{"location":"tutorials/operations/#production-example","title":"Production example","text":"

      A full production deployment example you find at Zalando\u2019s configuration repository.

      "},{"location":"tutorials/operations/#recommendations","title":"Recommendations","text":"

      We recommend to run a loadbalancer in front of Skipper to terminate TLS, such that cluster users can not access your keys and certificates. While skipper supports SNI, hardware and cloud loadbalancers often have hardware support to terminate TLS. It\u2019s cheaper for you to offload TLS to these devices and trust your compute vendor.

      We recommend to start simple and grow the feature set from there. Check features, that are used in >60 production clusters in Zalando\u2019s configuration repository.

      "},{"location":"tutorials/operations/#dashboards","title":"Dashboards","text":"

      As an operator, build a Skipper dashboard and learn how Skipper and the Go runtime behaves with your workload. We successfully ran several load tests from 0 to 25k requests per seconds. The load test was ramping up in less than a minute with initially 3 Skipper Pods, with an HPA that has CPU target value of 100%.

      Operations dashboard:

      Application metrics dashboard:

      "},{"location":"tutorials/ratelimit/","title":"Ratelimits","text":""},{"location":"tutorials/ratelimit/#overview","title":"Overview","text":"

      Ratelimits are calculated for a number of requests and a time.Duration for a given bucket. To enable rate limits you need to run skipper with -enable-ratelimits.

      A time.Duration is specified as string and can for example be \u201c10s\u201d for ten seconds, \u201c5m\u201d for five minutes or \u201c2h\u201d for two hours.

      As bucket skipper can use either the backend or some client information.

      In case of a backend ratelimit the bucket is only one global for one route.

      In case of a client ratelimit the buckets are created by the used ratelimit.Lookuper, which defaults to the X-Forwarded-For header, but can be also the Authorization header. So for the client ratelimit with X-Forwarded-For header, the client IP that the first proxy in the list sees will be used to lookup the bucket to count requests.

      "},{"location":"tutorials/ratelimit/#instance-local-ratelimit","title":"Instance local Ratelimit","text":"

      Filters ratelimit() and clientRatelimit() calculate the ratelimit in a local view having no information about other skipper instances.

      "},{"location":"tutorials/ratelimit/#backend-ratelimit","title":"Backend Ratelimit","text":"

      The backend ratelimit filter is ratelimit() and it is the simplest one. You can define how many requests a route allows for a given time.Duration to send to all backends of the route. This means that you can not limit traffic to a single backend instance.

      For example to limit the route to 10 requests per minute for each skipper instance, you can specify:

      ratelimit(10, \"1m\")\n
      "},{"location":"tutorials/ratelimit/#client-ratelimit","title":"Client Ratelimit","text":"

      The client ratelimit filter is clientRatelimit() and it uses information from the request to find the bucket which will get the increased request count.

      For example to limit the route to 10 requests per minute for each skipper instance for the same client selected by the X-Forwarded-For header, you can specify:

      clientRatelimit(10, \"1m\")\n

      There is an optional third argument that selects the same client by HTTP header value. As an example for Authorization Header you would use:

      clientRatelimit(10, \"1m\", \"Authorization\")\n

      The optional third argument can create an AND combined Header ratelimit. The header names must be separated by ,. For example all of the specified headers have to be the same to recognize them as the same client:

      clientRatelimit(10, \"1m\", \"X-Forwarded-For,Authorization,X-Foo\")\n

      Internally skipper has a clean interval to clean up old buckets to reduce the memory footprint in the long run.

      "},{"location":"tutorials/ratelimit/#security-consideration","title":"Security Consideration","text":"

      ClientRatelimit works on data provided by the client. In theory an attacker likely can workaround all of your configurations. On the other hand there is always a pattern in attacks, and you are more likely being able to find the pattern and mitigate the attack, if you have a powerful tool like the provided clientRatelimit.

      "},{"location":"tutorials/ratelimit/#cluster-ratelimit","title":"Cluster Ratelimit","text":"

      A cluster ratelimit computes all requests for all skipper peers. This requires, that you run skipper with -enable-swarm and select one of the two implementations:

      • Redis
      • SWIM

      Make sure all requirements, that are dependent on the implementation and your dataclient in use.

      "},{"location":"tutorials/ratelimit/#redis-based-cluster-ratelimits","title":"Redis based Cluster Ratelimits","text":"

      This solution is independent of the dataclient being used. You have to run one or more Redis instances. See also Running with Redis based Cluster Ratelimits.

      There are 3 different configurations to assign Redis instances as a Skipper Redis swarm.

      "},{"location":"tutorials/ratelimit/#static","title":"Static","text":"

      Specify -swarm-redis-urls, multiple instances can be separated by comma, for example: -swarm-redis-urls=redis1:6379,redis2:6379. Use this if you don\u2019t need to scale your Redis instances.

      "},{"location":"tutorials/ratelimit/#kubernetes-service-selector","title":"Kubernetes Service Selector","text":"

      Specify -kubernetes-redis-service-namespace=<namespace>, -kubernetes-redis-service-name=<name> and optional -kubernetes-redis-service-port=<port number>.

      Skipper will update Redis addresses every 10 seconds from specified service endpoints. This allows you to dynamically scale Redis instances. Note that when -kubernetes is set Skipper also fetches Ingresses and RouteGroups for routing, see ingress-controller deployment docs.

      "},{"location":"tutorials/ratelimit/#http-endpoint","title":"HTTP Endpoint","text":"

      Specify -swarm-redis-remote=http://127.0.0.1/redis/endpoints,

      Skipper will update Redis addresses every 10 seconds from this remote URL that should return data in the following JSON format:

      {\n    \"endpoints\": [\n        {\"address\": \"10.2.0.1:6379\"}, {\"address\": \"10.2.0.2:6379\"},\n        {\"address\": \"10.2.0.3:6379\"}, {\"address\": \"10.2.0.4:6379\"},\n        {\"address\": \"10.2.0.5:6379\"}\n    ]\n}\n

      If you have routesrv proxy enabled, you need to configure Skipper with the flag -swarm-redis-remote=http://<routesrv-service-name>.<routesrv-namespace>.svc.cluster.local/swarm/redis/shards. Routesrv will be responsible for collecting Redis endpoints and Skipper will poll them from it.

      "},{"location":"tutorials/ratelimit/#implementation","title":"Implementation","text":"

      The implementation use Redis ring to be able to shard via client hashing and spread the load across multiple Redis instances to be able to scale out the shared storage.

      The ratelimit algorithm is a sliding window and makes use of the following Redis commands:

      • ZREMRANGEBYSCORE,
      • ZCARD,
      • ZADD and
      • ZRANGEBYSCORE

      "},{"location":"tutorials/ratelimit/#swim-based-cluster-ratelimits","title":"SWIM based Cluster Ratelimits","text":"

      SWIM is a \u201cScalable Weakly-consistent Infection-style Process Group Membership Protocol\u201d, which is very interesting to use for cluster ratelimits. The implementation has some weaknesses in the algorithm, that lead sometimes to too much ratelimits or too few and therefore is not considered to be stable. For running skipper in Kubernetes with this, see also Running with SWIM based Cluster Ratelimits

      In case of Kubernetes you might specify additionally -swarm-label-selector-key, which defaults to \u201capplication\u201d and -swarm-label-selector-value, which defaults to \u201cskipper-ingress\u201d and -swarm-namespace, which defaults to \u201ckube-system\u201d.

      The following shows the setup of a SWIM based cluster ratelimit:

      "},{"location":"tutorials/ratelimit/#backend-ratelimit_1","title":"Backend Ratelimit","text":"

      The backend ratelimit filter is clusterRatelimit(). You can define how many requests a route allows for a given time.Duration in total for all skipper instances summed up. The first parameter is the group parameter, which can be used to select the same ratelimit group across one or more routes

      For example rate limit \u201cgroupA\u201d limits the rate limit group to 10 requests per minute in total for the cluster, you can specify:

      clusterRatelimit(\"groupA\", 10, \"1m\")\n
      "},{"location":"tutorials/ratelimit/#client-ratelimit_1","title":"Client Ratelimit","text":"

      The client ratelimit filter is clusterClientRatelimit() and it uses information from the request to find the bucket which will get the increased request count. You can define how many requests a client is allowed to hit this route for a given time.Duration in total for all skipper instances summed up. The first parameter is the group parameter, which can be used to select the same ratelimit group across one or more routes

      For example rate limit \u201cgroupB\u201d limits the rate limit group to 10 requests per minute for the full skipper swarm for the same client selected by the X-Forwarded-For header, you can specify:

      clusterClientRatelimit(\"groupB\", 10, \"1m\")\n

      The same for Authorization Header you would use:

      clusterClientRatelimit(\"groupC\", 10, \"1m\", \"Authorization\")\n

      The optional fourth argument can create an AND combined Header ratelimit. The header names must be separated by ,. For example all of the specified headers have to be the same to recognize them as the same client:

      clusterClientRatelimit(\"groupC\", 5, \"10s\", \"X-Forwarded-For,Authorization,X-Foo\")\n

      Internally skipper has a clean interval to clean up old buckets to reduce the memory footprint in the long run.

      "},{"location":"tutorials/ratelimit/#security-consideration_1","title":"Security Consideration","text":"

      ClusterClientRatelimit works on data provided by the client. In theory an attacker likely can workaround all of your configurations. On the other hand there is always a pattern in attacks, and you are more likely being able to find the pattern and mitigate the attack, if you have a powerful tool like the provided clusterClientRatelimit.

      "},{"location":"tutorials/shadow-traffic/","title":"Shadow Traffic","text":"

      This tutorial will show how to setup routing for shadow traffic, where one backend (main) will receive the full traffic, while a shadowing backend (test) will receive only a certain percentage of the same traffic.

      "},{"location":"tutorials/shadow-traffic/#used-predicates","title":"Used Predicates:","text":"
      • Tee
      • Traffic
      "},{"location":"tutorials/shadow-traffic/#used-filters","title":"Used Filters:","text":"
      • teeLoopback
      "},{"location":"tutorials/shadow-traffic/#1-initial-state","title":"1. Initial state","text":"

      Before the shadow traffic, we are sending all traffic to the main backend.

      main: * -> \"https://main.example.org\";\n
      "},{"location":"tutorials/shadow-traffic/#2-clone-the-main-route-handling-10-of-the-traffic","title":"2. Clone the main route, handling 10% of the traffic","text":"

      Before generating the shadow traffic, we create an identical clone of the main route that will handle only 10% of the traffic, while the rest stays being handled by the main route.

      main: * -> \"https://main.example.org\";\nsplit: Traffic(.1) -> \"https://main.example.org\";\n
      "},{"location":"tutorials/shadow-traffic/#3-prepare-the-route-for-the-shadow-traffic","title":"3. Prepare the route for the shadow traffic","text":"

      The route introduced next won\u2019t handle directly any incoming requests, because they won\u2019t be matched by the Tee predicate, but it is prepared to send tee requests to the alternative, \u2018shadow\u2019 backend.

      main: * -> \"https://main.example.org\";\nsplit: Traffic(.1) -> \"https://main.example.org\";\nshadow: Tee(\"shadow-test-1\") && True() -> \"https://shadow.example.org\";\n
      "},{"location":"tutorials/shadow-traffic/#4-apply-the-teeloopback-filter","title":"4. Apply the teeLoopback filter","text":"

      Now we can apply the teeLoopback filter to the \u2018split\u2019 route, using the same label as we did in the Tee predicate.

      main: * -> \"https://main.example.org\";\nsplit: Traffic(.1) -> teeLoopback(\"shadow-test-1\") -> \"https://main.example.org\";\nshadow: Tee(\"shadow-test-1\") && True() -> \"https://shadow.example.org\";\n

      Note that as of now, we need to increase the weight of the \u2018shadow\u2019 route by adding the True() predicate in order to avoid that the \u2018split\u2019 route would match the cloned request again.

      After this, the \u2018split\u2019 route will still send all the handled requests, 10% of the total traffic, to the main backend, while the rest of the traffic is routed there by the \u2018main\u2019 route. However, the teeLoopback filter will also clone the traffic of the \u2018split\u2019 route, 10% of the total, and reapply the routing on it, during which these requests will be matched by the Tee predicate, and sent to the shadow backend.

      "}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000000..50110fec7e --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,163 @@ + + + + https://opensource.zalando.com/skipper/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/data-clients/eskip-file/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/data-clients/eskip-remote/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/data-clients/etcd/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/data-clients/kubernetes/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/data-clients/route-string/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/kubernetes/east-west-usage/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/kubernetes/external-addresses/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/kubernetes/ingress-backends/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/kubernetes/ingress-controller/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/kubernetes/ingress-usage/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/kubernetes/routegroup-crd/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/kubernetes/routegroup-validation/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/kubernetes/routegroups/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/operation/deployment/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/operation/operation/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/reference/architecture/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/reference/backends/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/reference/development/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/reference/egress/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/reference/filters/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/reference/plugins/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/reference/predicates/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/reference/scripts/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/tutorials/auth/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/tutorials/basics/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/tutorials/built-your-own/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/tutorials/common-use-cases/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/tutorials/development/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/tutorials/operations/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/tutorials/ratelimit/ + 2024-05-30 + daily + + + https://opensource.zalando.com/skipper/tutorials/shadow-traffic/ + 2024-05-30 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 0000000000..3f0a55fdbd Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/skipper-h180.png b/skipper-h180.png new file mode 100644 index 0000000000..e1c1ab2281 Binary files /dev/null and b/skipper-h180.png differ diff --git a/tutorials/auth/index.html b/tutorials/auth/index.html new file mode 100644 index 0000000000..ba8d1c5ac8 --- /dev/null +++ b/tutorials/auth/index.html @@ -0,0 +1,2029 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Authentication and Authorization - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + +
      + +
      + + + + + + +
      +
      + + + +
      +
      +
      + + + + + +
      +
      +
      + + + + + + + +
      +
      + + + + + + + +

      Authentication and Authorization

      + +

      Basic auth

      +

      Basic Auth is defined in RFC7617.

      +

      Install htpasswd command line tool, we assume Debian based +system. Please refer the documentation of your Operating System or +package management vendor how to install htpasswd:

      +
      apt-get install apache2-utils
      +
      +

      Create a htpasswd file foo.passwd and use captain with password apassword:

      +
      htpasswd -bcB foo.passwd captain apassword
      +
      +

      Start skipper with a basicAuth filter referencing the just created +htpasswd file:

      +
      ./bin/skipper -address :8080 -inline-routes 'r: * -> basicAuth("foo.passwd") -> status(200) -> <shunt>'
      +
      +

      A client request without login credentials or wrong credentials:

      +
      % curl localhost:8080/ -v
      +*   Trying ::1...
      +* Connected to localhost (::1) port 8080 (#0)
      +> GET / HTTP/1.1
      +> Host: localhost:8080
      +> User-Agent: curl/7.49.0
      +> Accept: */*
      +>
      +< HTTP/1.1 401 Unauthorized
      +< Server: Skipper
      +< Www-Authenticate: Basic realm="Basic Realm"
      +< Date: Thu, 01 Nov 2018 21:27:18 GMT
      +< Content-Length: 0
      +<
      +* Connection #0 to host localhost left intact
      +
      +

      A client request with the correct credentials:

      +
      % curl captain:apassword@localhost:8080/ -v
      +*   Trying ::1...
      +* Connected to localhost (::1) port 8080 (#0)
      +* Server auth using Basic with user 'captain'
      +> GET / HTTP/1.1
      +> Host: localhost:8080
      +> Authorization: Basic Y2FwdGFpbjphcGFzc3dvcmQ=
      +> User-Agent: curl/7.49.0
      +> Accept: */*
      +>
      +< HTTP/1.1 200 OK
      +< Server: Skipper
      +< Date: Thu, 01 Nov 2018 21:29:21 GMT
      +< Content-Length: 0
      +<
      +* Connection #0 to host localhost left intact
      +
      +

      Token service-to-service

      +

      Service to service authentication and authorization is often done by +using the HTTP Authorization header with the content prefix “Bearer “, +for example “Authorization: Bearer mytoken”.

      +

      Supported token formats

      + +

      Tokeninfo

      +

      Tokeninfo is a common, but not specified protocol, only supporting +Bearer tokens in the Authorization header.

      +

      In most cases you would have to have your own OAuth2 token +infrastructure, that can return JWT or OAuth2 access tokens to authenticated parties +and validate tokens with their custom tokeninfo endpoint. In case of +JWT the access token is signed and can be validated without a central +tokeninfo endpoint.

      +

      Example route:

      +
      all: Path("/")
      +     -> oauthTokeninfoAnyScope("read-X", "readwrite-X")
      +     -> "http://localhost:9090/"
      +
      +

      The access token should be passed from the client as Bearer token in +the Authorization header. Skipper will send this token unchanged as +Bearer token in the Authorization header to the Tokeninfo endpoint. +The request flow with a Tokeninfo setup is shown in the following +picture:

      +

      Skipper with Tokeninfo

      +

      Tokenintrospection RFC7662

      +

      Tokenintrospection service to service authentication and authorization +is specified by RFC7662. +Skipper uses RFC Draft for discovering token infrastructure +configuration, +to find the introspection_endpoint.

      +

      Example route:

      +
      all: *
      +        -> oauthTokenintrospectionAnyKV("https://identity.example.com/managed-id", "jdoe")
      +        -> "http://localhost:9090/";
      +
      +

      The access token should be passed from the client as Bearer token in +the Authorization header. Skipper will send this token as +defined in RFC7662 +in a POST request “application/x-www-form-urlencoded” as value for key +token to the Tokenintrospection endpoint. +The request flow with Tokenintrospection setup is shown in the +following picture:

      +

      Skipper with Tokenintrospection

      +

      OpenID Connect

      +

      OpenID Connect is an OAuth2.0 based authentication and authorization mechanism supported by +several providers. Skipper can act as a proxy for backend server which requires authenticated clients. +Skipper handles the authentication with the provider and upon successful completion of authentication +passes subsequent requests to the backend server.

      +

      Skipper’s implementation of OpenID Connect Client works as follows:

      +
        +
      1. Filter is initialized with the following parameters:
          +
        1. Secrets file with keys used for encrypting the token in a cookie and also for generating shared secret.
        2. +
        3. OpenID Connect Provider URL
        4. +
        5. The Client ID
        6. +
        7. The Client Secret
        8. +
        9. The Callback URL for the client when a user successfully authenticates and is + returned.
        10. +
        11. The Scopes to be requested along with the openid scope
        12. +
        13. The claims that should be present in the token or the fields need in the user + information.
        14. +
        +
      2. +
      3. The user makes a request to a backend which is covered by an OpenID filter.
      4. +
      5. Skipper checks if a cookie is set with any previous successfully completed OpenID authentication.
      6. +
      7. If the cookie is valid then Skipper passes the request to the backend.
      8. +
      9. If the cookie is not valid then Skipper redirects the user to the OpenID provider with its Client ID and a callback URL.
      10. +
      11. When the user successfully completes authentication the provider redirects the user to the callback URL with a token.
      12. +
      13. Skipper receives this token and makes a backend channel call to get an ID token + and other required information.
      14. +
      15. If all the user information/claims are present then it encrypts this and sets a cookie + which is encrypted and redirects the user to the originally requested URL.
      16. +
      +

      To use OpenID define a filter for a backend which needs to be covered by OpenID Connection authentication.

      +
      oauthOidcAllClaims("https://accounts.identity-provider.com", "some-client-id",
      +    "some-client-secret", "http://callback.com/auth/provider/callback", "scope1 scope2",
      +    "claim1 claim2") -> "https://internal.example.org";
      +
      +

      Here scope1 scope2 are the scopes that should be included which requesting authentication from the OpenID provider. +Any number of scopes can be specified here. The openid scope is added automatically by the filter. The other fields +which need to be specified are the URL of the provider which in the above example is +https://accounts.identity-provider.com. The client ID and the client secret. The callback URL which is specified +while generating the client id and client secret. Then the scopes and finally the claims which should be present along +with the return id token.

      +
      oauthOidcUserInfo("https://oidc-provider.example.com", "client_id", "client_secret",
      +    "http://target.example.com/subpath/callback", "email profile",
      +    "name email picture") -> "https://internal.example.org";
      +
      +

      This filter is similar but it verifies that the token has certain user information +information fields accessible with the token return by the provider. The fields can +be specified at the end like in the example above where the fields name, email +and picture are requested.

      +

      Upon successful authentication Skipper will start allowing the user requests through +to the backend. Along with the original request to the backend Skipper will include +information which it obtained from the provider. The information is in JSON format +with the header name Skipper-Oidc-Info. In the case of the claims container the +header value is in the format.

      +
      {
      +    "oauth2token": "xxx",
      +    "claims": {
      +        "claim1": "val1",
      +        "claim2": "val2"
      +    },
      +    "subject": "subj"
      +}
      +
      +

      In the case of a user info filter the payload is in the format:

      +
      {
      +    "oauth2token": "xxx",
      +    "userInfo": {
      +        "sub": "sub",
      +        "profile": "prof",
      +        "email": "abc@example.com",
      +        "email_verified": "abc@example.com"
      +    },
      +    "subject": "subj"
      +}
      +
      +

      Skipper encrypts the cookies and also generates a nonce during the OAuth2.0 flow +for which it needs a secret key. This key is in a file which can be rotated periodically +because it is reread by Skipper. The path to this file must be passed with the flag +-oidc-secrets-file when Skipper is started.

      +

      AuthZ and access control

      +

      Authorization validation and access control is available by means of a subsequent filter oidcClaimsQuery. It inspects the ID token, which exists after a successful oauthOidc* filter step, and validates the defined query with the request path.

      +

      Given following example ID token:

      +
      {
      +  "email": "someone@example.org",
      +  "groups": [
      +    "CD-xyz",
      +    "appX-Tester"
      +  ],
      +  "name": "Some One"
      +}
      +
      +

      Access to path / would be granted to everyone in example.org, however path /login only to those being member of group "appX-Tester":

      +
      oauthOidcAnyClaims(...) -> oidcClaimsQuery("/login:groups.#[==\"appX-Tester\"]", "/:@_:email%\"*@example.org\"")
      +
      +

      OAuth2 authorization grant flow

      +

      Authorization grant flow is a mechanism +to coordinate between a user-agent, a client, and an authorization server to obtain an OAuth2 +access token for a user. Skipper supports the flow with the oauthGrant() filter. +It works as follows:

      +
        +
      1. A user makes a request to a route with oauthGrant().
      2. +
      3. The filter checks whether the request has a cookie called oauth-grant1. If it does not, or + if the cookie and its tokens are invalid, it redirects the user to the OAuth2 provider’s + authorization endpoint2.
      4. +
      5. The user logs into the external OAuth2 provider, e.g. by providing a username and password.
      6. +
      7. The provider redirects the user back to Skipper with an authorization code, using the + redirect_uri URL parameter which was part of the previous redirect2. The callback route must + have a grantCallback() filter defined. Skipper automatically adds this callback route for you + when the OAuth2 authorization grant flow feature is enabled. Note that the automatically added + callback route does not apply default filters. + If you need default filters to be applied to the callback route as well, please register + the route manually in your routes files.
      8. +
      9. Skipper calls the provider’s token URL with the authorization code, and receives a response + with the access and refresh tokens.
      10. +
      11. Skipper stores the tokens in an oauth-grant1 cookie which is stored in the user’s browser.
      12. +
      13. Subsequent calls to any route with an oauthGrant() filter will now pass as long as the + access token is valid.
      14. +
      +

      1 The name of this cookie can be changed by providing the -oauth2-token-cookie-name parameter.

      +

      2 The value of redirect_uri parameter of the authorization flow could be set by providing -oauth2-auth-url-parameters=redirect_uri=https://example.org/oauth-callback. + If not set Skipper will automatically determine it based on the initial request hostname and -oauth2-callback-path flag value.

      +

      Please note that it is not currently possible to use multiple OAuth2 providers with Skipper.

      + +

      The cookie set by the oauthGrant() filter contains the OAuth2 access and refresh tokens in +encrypted form. This means Skipper does not need to persist any session information about users, +while also not exposing the tokens to users.

      +

      Token refresh

      +

      The oauthGrant() filter also supports token refreshing. Once the access token expires and +the user makes another request, the filter automatically refreshes the token and sets the +updated cookie in the response.

      +

      Instructions

      +

      To use authorization grant flow, you need to:

      +
        +
      1. Configure OAuth2 credentials.
      2. +
      3. Configure the grant filters with OAuth2 URLs.
      4. +
      5. Add the OAuth2 grant filters to routes.
      6. +
      +

      Configure OAuth2 credentials

      +

      Before you start, you need to register your application with the OAuth2 provider. +If your provider asks you for the callback URL, provide the URL that you set +as the -oauth2-callback-path parameter. If you did not provide a value, use the default +route : /.well-known/oauth2-callback.

      +

      Skipper must be configured with the following credentials and secrets:

      +
        +
      1. OAuth2 client ID for authenticating with the OAuth2 provider.
      2. +
      3. OAuth2 client secret for authenticating with the OAuth2 provider.
      4. +
      5. Cookie encryption secret for encrypting and decrypting token cookies.
      6. +
      +

      You can load all of these secrets from separate files, in which case they get automatically +reloaded to support secret rotation. +You can provide the paths to the files containing each secret as follows:

      +
      skipper -oauth2-client-id-file=/path/to/client_id \
      +    -oauth2-client-secret-file=/path/to/client_secret \
      +    -oauth2-secret-file=/path/to/cookie_encryption_secret \
      +    -credentials-update-interval=30s
      +
      +

      Paths may contain {host} placeholder which will be replaced by the request host. +This is used to define separate credentials for different hosts.

      +

      Care must be taken when used in conjunction with -credentials-paths option because files +from -credentials-paths are available to bearerinjector filter. +That is -credentials-paths=/path/to in above example will expose grant files to bearerinjector filter.

      +

      You can modify the secret update interval using the -credentials-update-interval argument. In +example above, the interval is configured to reload the secrets from the files every 30 +seconds.

      +

      If you prefer, you can provide the client ID and secret values directly as arguments to +Skipper instead of loading them from files. In that case, call Skipper with:

      +
      skipper -oauth2-client-id=<CLIENT_ID> -oauth2-client-secret=<CLIENT_SECRET>
      +
      +

      Configure the grant filters

      +

      The grant filters need to be enabled and configured with your OAuth2 provider’s +authorization, token, and tokeninfo endpoints. This can be achieved by providing Skipper +with the following arguments:

      +
      skipper -enable-oauth2-grant-flow \
      +    -oauth2-auth-url=<OAUTH2_AUTHORIZE_ENDPOINT> \
      +    -oauth2-token-url=<OAUTH2_TOKEN_ENDPOINT> \
      +    -oauth2-revoke-token-url=<OAUTH2_REVOKE_TOKEN_ENDPOINT> \
      +    -oauth2-tokeninfo-url=<OAUTH2_TOKENINFO_ENDPOINT> \
      +    -oauth2-callback-path=/oauth/callback
      +
      +

      The -oauth2-revoke-token-url is optional, and should be supplied if you plan +to use the grantLogout filter to revoke tokens.

      +

      You can configure the oauthGrant() filter further for your needs. See the +oauthGrant filter reference for more details.

      +

      Add filters to your routes

      +

      You can protect any number of routes with the oauthGrant() filter. Unauthenticated users +will be refused access and redirected to log in.

      +

      Skipper will automatically add a callback route for you with the grantCallback filter registered +on it. The path for this route can be configured with the -oauth2-callback-path parameter. +If the parameter is not given, it will be /.well-known/oauth2-callback

      +

      You can optionally add a grantLogout() filter to delete token cookie. +If -oauth2-revoke-token-url is set it will revoke access and refresh tokens:

      +
      foo:
      +    Path("/foo")
      +    -> oauthGrant()
      +    -> "http://localhost:9090";
      +
      +logout:
      +    Path("/logout)
      +    -> grantLogout()
      +    -> redirectTo(302)
      +    -> <shunt>;
      +
      +

      (Optional) AuthZ and access control

      +

      You can add a grantClaimsQuery filter +after a oauthGrant to control access based +on any OAuth2 claim. A claim is any property returned by the tokeninfo endpoint. +The filter works exactly like the oidcClaimsQuery +filter (it is actually just an alias for it).

      +

      For example, if your tokeninfo endpoint returns the following JSON:

      +
      {
      +    "scope": ["email"],
      +    "username": "foo"
      +}
      +
      +

      you could limit the access to a given route only to users that have the email +scope by doing the following:

      +
        +
      1. Append a grantClaimsQuery filter to the oauthGrant filter with the following + query: +
        -> oauthGrant() -> grantClaimsQuery("/path:scope.#[==\"email\"]")
        +
      2. +
      3. Provide the name of the claim that corresponds to the OAuth2 subject in the + tokeninfo payload as an argument to Skipper: +
        skipper -oauth2-tokeninfo-subject-key=username
        +
      4. +
      +
      +

      The subject is the field that identifies the user and is often called sub, +especially in the context of OpenID Connect. In the example above, it is username.

      +
      +

      Open Policy Agent

      +

      To enable Open Policy Agent filter, use the -enable-open-policy-agent command line flag.

      +

      Open Policy Agent is integrated as a Go library so no extra setup is needed to run. Every filter creates a virtual OPA instance in memory that is configured using a configuration file in the same configuration format that a standalone OPA would use. To allow for configurability, the configuration file is interpolated using Go Templates to allow every virtual instance to pull different bundles. This template file is passed using the -open-policy-agent-config-template flag.

      +

      Configuration File

      +

      As an example the following initial config can be used

      +
      services:
      +  - name: bundle-service
      +    url: https://my-example-opa-bucket.s3.eu-central-1.amazonaws.com
      +    credentials:
      +      s3_signing:
      +        environment_credentials: {}
      +labels:
      +  environment: production
      +discovery:
      +  name: discovery
      +  prefix: "/applications/{{ .bundlename }}"
      +
      +

      The variable .bundlename is the first argument in the following filters and can be in any format that OPA can understand, so for example application IDs from a registry, uuids, …

      +

      Input Structures

      +

      Input structures to policies follow those that are used by the opa-envoy-plugin, the existing examples and documentation apply also to Skipper. Please note that the filters in Skipper always generate v3 input structures.

      +

      Passing context to the policy

      +

      Generally there are two ways to pass context to a policy:

      +
        +
      1. as part of the labels in Open Policy Agent (configured in the configuration file, see below) that should be used for deployment level taxonomy,
      2. +
      3. as part of so called context extensions that are part of the Envoy external auth specification.
      4. +
      +

      This context can be passed as second argument to filters:

      +

      opaAuthorizeRequest("my-app-id", "com.mycompany.myprop: myvalue") +or opaAuthorizeRequest("my-app-id", "{'com.mycompany.myprop': 'my value'}")

      +

      The second argument is parsed as YAML, cannot be nested and values need to be strings.

      +

      In Rego this can be used like this input.attributes.contextExtensions["com.mycompany.myprop"] == "my value"

      +

      Quick Start Rego Playground

      +

      A quick way without setting up Backend APIs is to use the Rego Playground.

      +

      To get started pick from examples Envoy > Hello World. Click on “Publish” and note the random ID in the section “Run OPA with playground policy”.

      +

      Place the following file in your local directory with the name opaconfig.yaml

      +
      bundles:
      +  play:
      +    resource: bundles/{{ .bundlename }}
      +    polling:
      +      long_polling_timeout_seconds: 45
      +services:
      +  - name: play
      +    url: https://play.openpolicyagent.org
      +plugins:
      +  envoy_ext_authz_grpc:
      +    # This needs to match the package, defaulting to envoy/authz/allow
      +    path: envoy/http/public/allow
      +    dry-run: false
      +decision_logs:
      +  console: true
      +
      +

      Start Skipper with

      +
      skipper -enable-open-policy-agent -open-policy-agent-config-template opaconfig.yaml \
      +  -inline-routes 'notfound: * -> opaAuthorizeRequest("<playground-bundle-id>") -> inlineContent("<h1>Authorized Hello</h1>") -> <shunt>'
      +
      +

      You can test the policy with

      +
        +
      • Authorized: curl http://localhost:9090/ -i
      • +
      • Authorized: curl http://localhost:9090/foobar -H "Authorization: Basic charlie" -i
      • +
      • Forbidden: curl http://localhost:9090/foobar -i
      • +
      + + + + + + + + + + + + + +
      +
      + + + +
      + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorials/basics/index.html b/tutorials/basics/index.html new file mode 100644 index 0000000000..83694c2983 --- /dev/null +++ b/tutorials/basics/index.html @@ -0,0 +1,2032 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Basics - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + +
      + +
      + + + + + + +
      +
      + + + +
      +
      +
      + + + + + +
      +
      +
      + + + + + + + +
      +
      + + + + + + + +

      Basics

      + +

      Architecture

      +

      The core business of skipper is routing based on HTTP. It performs and +scales well, for example it handles more than 800000 routes in +production with 60000 requests per second.

      +

      Skipper is written as a library and is also a multi binary project with +2 binaries, named skipper and eskip. Skipper is the HTTP proxy +and eskip is a CLI application to verify, print, update or delete +Skipper routes.

      +

      Skipper’s internal architecture is split into different packages. The +skipper package has connections to multiple dataclient, that pull +information from different sources, for example local routes from an +eskip file or dynamic routes from Kubernetes ingress objects.

      +

      The proxy package gets the routes populated by skipper and has +always a current routing table which will be replaced on change.

      +

      A route is one entry in the routing table. A route consists of one or +more predicate, that are used to find a route for a given HTTP +request. A route can also have one or more filter, that can modify +the content of the request or response. A route can point to a +backend, it can be a <shunt>, meaning that skipper serves the requests +for the route, a <loopback>, meaning that the requests will be +matched against the routing table again after filters have modified +them, or a <dynamic>, meaning that the target backend must be set in a filter.

      +

      Opentracing API is supported via +tracers and you can find all of them in ./tracing/tracers/. For +example Jaeger is supported.

      +

      Skipper has a rich set of metrics that are exposed as json, but can +also be exported in Prometheus format.

      +

      Skipper's architecture

      +

      Concepts

      +

      Route definition

      +

      A route consists of an ID, predicates, filters and a backend and is +most often written in eskip syntax.

      +

      Syntax:

      +
      ID:
      +        Predicate1() && .. && PredicateN()
      +        -> filter1()
      +        ...
      +        -> filterN()
      +        -> BACKEND
      +
      +

      An example routing configuration:

      +
      baidu:
      +        Path("/baidu")
      +        -> setRequestHeader("Host", "www.baidu.com")
      +        -> setPath("/s")
      +        -> setQuery("wd", "godoc skipper")
      +        -> "http://www.baidu.com";
      +google:
      +        *
      +        -> setPath("/search")
      +        -> setQuery("q", "godoc skipper")
      +        -> "https://www.google.com";
      +yandex:
      +        * && Cookie("yandex", "true")
      +        -> setPath("/search/")
      +        -> setQuery("text", "godoc skipper")
      +        -> tee("http://127.0.0.1:12345/")
      +        -> "https://yandex.ru";
      +
      +

      Predicate

      +

      A Predicate adds a matching rule to a route. +For example the Cookie predicate, Cookie("yandex", "true"), matched +if there is a cookie in the request with name “yandex” and the value +is “true”, else the route processing will go on and try to find +another matching route for the given request. Multiple predicates can +be combined by && which means a logical AND. If you need a +logical OR, you have to create another route.

      +

      Special Predicates:

      +
        +
      • * catch all is always true
      • +
      • Path() reduces the number of routes in O(log n) time to scan afterwards a subset in linear time
      • +
      • PathSubtree() reduces the number of routes O(log n) time to scan afterwards a subset in linear time
      • +
      +

      Predicate and routing table

      +

      A routing table consists of a number of routes. A route has a list of +predicates and filters. Predicates match an incoming request to a +specific, best matching, route. Each route has a set of filters.

      +

      picture of a Predicate

      +

      Filter

      +

      A filter changes a HTTP request or response or both. Multiple filters +can be concatenated by ->.

      +

      Some special filters are:

      +
        +
      • inlineContent() sets the HTTP response body, should be used with status() filter and backend
      • +
      • static() serves static files and should be used with backend
      • +
      • status() sets HTTP status code to a given value, should be used with backend
      • +
      • tee() clones request to given target
      • +
      +

      Filter in context of an HTTP request

      +

      The picture shows the transformation of the requests and responses

      +

      picture of a Filter

      +

      Backend

      +

      The last entry of a route is the backend definition, that will be +called with the result request after filter processing. Normally this +is an URL string.

      +

      Special backends:

      +
        +
      • <loopback> restart route processing with the possibly changed request
      • +
      • <shunt> stops processing, used for fast returns
      • +
      • <dynamic> target is set dynamically in a filter
      • +
      • <$algorithm, "be1", "be2", ..., "beN"> load balanced backend with N backends
      • +
      +

      See more about backends in backend references.

      +

      Dataclient

      +

      Dataclients are used to pull route information from a data source. The +data will be used to create routes according to the dataclient. As a +special case, for example kubernetes dataclient automatically adds +HTTP->HTTPS redirects if skipper is started with -kubernetes-https-redirect.

      +

      Dataclients:

      + +

      Route processing

      +

      Package skipper has a Go http.Server and does the ListenAndServe +call with the loggingHandler wrapped proxy. The loggingHandler +is basically a middleware for the proxy providing access logs and +both implement the plain Go http.Handler interface.

      +

      For each incoming http.Request the proxy will create a request +context and enhance it with an Opentracing API Span. +It will check proxy global ratelimits first and after that lookup the +route in the routing table. After that skipper will apply all request +filters, that can modify the http.Request. It will then check the +route local ratelimits, the circuitbreakers and do the backend +call. If the backend call got a TCP or TLS connection error in a +loadbalanced route, skipper will do a retry to another backend of that +loadbalanced group automatically. Just before the response to the +caller, skipper will process the response filters, that can change the +http.Response.

      +

      In two special cases, skipper doesn’t forward the request to the +backend. When the route is shunted (<shunt>), skipper serves the +request alone, by using only the filters. When the route is a +<loopback>, the request is passed to the routing table for finding +another route, based on the changes that the filters made to the +request. In case it will always find a <loopback> route it will stop +after maximum number of loopbacks is reached and logs an error.

      +

      Skipper's request and response processing

      +

      Routing mechanism

      +

      The routing executes the following steps in the typical case:

      +
        +
      1. +

        Select the best fitting route by matching the request against the + predicates. When no route found, respond with 404 (unless the default + status code is configured to a different value).

        +
      2. +
      3. +

        Execute the filters defined in the route in normal order on the + request. The filters may or may not alter the request.

        +
      4. +
      5. +

        Forward the request to the backend defined by the route and receive + a response.

        +
      6. +
      7. +

        Execute the filters defined in the route in reverse order on the + response. The filters may or may not alter the response.

        +
      8. +
      9. +

        Respond to the incoming request with the resulting response.

        +
      10. +
      +

      Route matching

      +

      Skipper can handle a relatively large number of routes with acceptable +performance, while being able to use any attribute of the incoming HTTP +requests to distinguish between them. In order to be able to do so, the +path matching predicates (Path() and PathSubtree() but not PathRegexp()) +have a special role during route matching, which is a tradeoff by +design, and needs to be kept in mind to understand in some cases why a +certain route was matched for a request instead of another.

      +

      The route matching logic can be summed up as follows:

      +
        +
      1. +

        Lookup in the path tree based on the Path() and the PathSubtree() + predicates, using the path component of the incoming request’s URI. Then + the remaining predicates of the found route(s) are evaluated.

        +
          +
        • +

          the path lookup is a radix tree with O(log(n)) time complexity

          +
        • +
        • +

          in case of intersecting paths, the more specific path is matched in + the tree

          +
        • +
        • +

          PathRegexp() is not used in the tree, but it is evaluated only after + Path() or PathSubtree(), just like e.g. Method() or Host().

          +
        • +
        +
      2. +
      3. +

        If step #1 matches multiple routes, which means there are multiple + routes in the same position of the path tree, and all other predicates + match the request, too, then the route with the highest + weight is matched.

        +
          +
        • +

          this is an O(n) lookup, but only on the same leaf

          +
        • +
        • +

          the root of the tree is considered a single leaf, so if not using the + Path() or PathSubtree() predicates, the entire lookup will become O(n) + over all the routes.

          +
        • +
        +
      4. +
      5. +

        If #2 results in multiple matching routes, then one route will be + selected. It is unspecified which one.

        +
      6. +
      +

      See more details about the predicates here: Predicates.

      +

      Route creation

      +

      Skipper has two kind of routes:

      +
        +
      1. eskip.Route
      2. +
      3. routing.Route
      4. +
      +

      An eskip.Route is the parsed representation of user input. This will +be converted to a routing.Route, when the routing table is built. A +tree of routing.Route will be used to match an incoming Request to a route.

      +

      Route creation steps:

      +
        +
      1. Skipper’s route creation starts with the Dataclient + to fetch routes ([]*eskip.Route).
      2. +
      3. These will be first processed by + []routing.PreProcessor. PreProcessors are able to add, remove, + modify all []*eskip.Route.
      4. +
      5. After that []*eskip.Route are converted to []*routing.Route.
      6. +
      7. []routing.PostProcessor are executed. PostProcessors are a ble to + add, remove, modify all []*routing.Route.
      8. +
      9. Last the active routing table is swapped. Now all incoming requests + are handled by the new routing table
      10. +
      +

      Building skipper

      +

      Local build

      +

      To get a local build of skipper for your CPU architecture, you can run +make skipper. To cross compile to non Linux platforms you can use:

      +
        +
      • make build.darwin for Mac OS X (amd64)
      • +
      • make build.windows for Windows (amd64)
      • +
      +

      The local build will write into ./bin/ directory.

      +

      CI build

      +

      The current used CI flow to build the official docker container, you +can see in delivery.yaml. +Official release versions you will find at +registry.opensource.zalan.do/teapot/skipper:${RELEASE_VERSION}, +where ${RELEASE_VERSION} is the git tag got by $(git describe --tags --always --dirty).

      +

      Test versions are released at +registry.opensource.zalan.do/teapot/skipper-test:${CDP_BUILD_VERSION} +for every pull request, limited to only repository members, because of +compliance and security reasons.

      +

      Testing routes

      +

      To test routes you can use a local build of skipper and pass arguments +-inline-routes=<route string> or for more complex ones +use a local eskip file on disk and use -routes-file=<filepath>.

      +

      Example:

      +
      ./bin/skipper -address :9999 -inline-routes 'r: * -> setQuery("lang", "pt") -> "http://127.0.0.1:8080/"'
      +
      +

      Now you have a proxy running that will set a query to your request URL +and call http://127.0.0.1:8080/?lang=pt

      +

      The simplest way of testing a proxy is using a local backend and a +local browser.

      +

      Local backend example:

      +
      ./bin/skipper -address :8080 -inline-routes 'r: * -> inlineContent("Hello world!") -> status(200) -> <shunt>'
      +
      +

      If you want to do the request and see the response in detail, you can +use curl as a browser, which should be installed on most Linux and +Mac OS X computers.

      +

      Example client call to our defined proxy:

      +
      % curl localhost:8080 -v
      +* Rebuilt URL to: localhost:8080/
      +*   Trying ::1...
      +* Connected to localhost (::1) port 8080 (#0)
      +> GET / HTTP/1.1
      +> Host: localhost:8080
      +> User-Agent: curl/7.49.0
      +> Accept: */*
      +>
      +< HTTP/1.1 200 OK
      +< Content-Length: 12
      +< Content-Type: text/plain; charset=utf-8
      +< Server: Skipper
      +< Date: Thu, 01 Nov 2018 15:54:13 GMT
      +<
      +* Connection #0 to host localhost left intact
      +Hello world!
      +
      +

      YAML Configuration

      +

      The usage of flags to configure the skipper binary can get quickly out +of hand. You can use a yaml file instead to populate the flags presented +in the skipper -help command.

      +
      kubernetes: true
      +kubernetes-in-cluster: true
      +kubernetes-https-redirect: true
      +proxy-preserve-host: true
      +serve-host-metrics: true
      +address: ":8080"
      +enable-ratelimits: true
      +experimental-upgrade: true
      +metrics-exp-decay-sample: true
      +lb-healthcheck-interval: "3s"
      +metrics-flavour: ["codahale","prometheus"]
      +enable-connection-metrics: true
      +whitelisted-healthcheck-cidr: "172.20.0.0/16"
      +ignore-trailing-slash: true
      +inline-routes: 'r: * -> inlineContent("Hello world!") -> status(200) -> <shunt>'
      +
      +

      Considering that this file would be named config.yaml you can use it to populate +the flags using the config-file flag:

      +
      ./bin/skipper -config-file=config.yaml
      +
      +

      Performing the same call to the address as exemplified in the previous section should +yield the same results.

      +

      Current routing table

      +

      To investigate the current routing table skipper has loaded into its +memory, you can use the -support-listener, which defaults to port +9911 and you have to do a GET request to the /routes endpoint.

      +

      Example:

      +
      % curl localhost:9911/routes
      +r: *
      +  -> setQuery("lang", "pt")
      +  -> "http://127.0.0.1:8000";
      +
      +

      If you do not see your route, then you have most probably a syntax +error in your route definition, such that the route was not loaded +into memory.

      +

      To print the number of routes, X-Count header, and the last update +timestamp, X-Timestamp header, you can use a HEAD request to the +support listener /routes endpoint:

      +
      % curl -I localhost:9911/routes
      +HTTP/1.1 200 OK
      +Content-Type: text/plain
      +X-Count: 1
      +X-Timestamp: 1541086036
      +Date: Fri, 02 Nov 2018 00:30:43 GMT
      +
      +

      For skipper operators the number of routes can be interesting for +statistics and the timestamp to detect skipper instances that have not +updated its routing table.

      +

      If there is more than 1024 routes used, then the paging the results is +possible with the offset and limit query parameters:

      +
      curl localhost:9911/routes?offset=2048&limit=512
      +
      +

      Route IDs

      +

      In the following example rid is the route ID:

      +
      % curl localhost:9911/routes
      +rid: *
      +  -> setQuery("lang", "pt")
      +  -> "http://127.0.0.1:8000";
      +
      +

      If the route ID has a prefix kube_, then it is a route created by +the Kubernetes dataclient. We do not disallow that you create manually +routes with kube_ prefix, but most of the time you should not use it +in other routes to differentiate the routes created by other +dataclients, in case you use multiple at the same time.

      + + + + + + + + + + + + + +
      +
      + + + +
      + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorials/built-your-own/index.html b/tutorials/built-your-own/index.html new file mode 100644 index 0000000000..d577f2396d --- /dev/null +++ b/tutorials/built-your-own/index.html @@ -0,0 +1,1429 @@ + + + + + + + + + + + + + + + + + + + + + + + Built Your Own Proxy - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + +
      + +
      + + + + + + +
      +
      + + + +
      +
      +
      + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + + + + +

      Built your own skipper proxy

      +

      One of the biggest advantages of skipper compared to other HTTP +proxies is that skipper is a library first design. This means that it +is common to built your custom proxy based on skipper.

      +

      A minimal example project is +skipper-example-proxy.

      +
      /*
      +This command provides an executable version of skipper with the default
      +set of filters.
      +
      +For the list of command line options, run:
      +
      +    skipper -help
      +
      +For details about the usage and extensibility of skipper, please see the
      +documentation of the root skipper package.
      +
      +To see which built-in filters are available, see the skipper/filters
      +package documentation.
      +*/
      +package main
      +
      +import (
      +    log "github.com/sirupsen/logrus"
      +    lfilters "github.com/szuecs/skipper-example-proxy/filters"
      +    "github.com/zalando/skipper"
      +    "github.com/zalando/skipper/config"
      +)
      +
      +func main() {
      +    cfg := config.NewConfig()
      +    if err := cfg.Parse(); err != nil {
      +        log.Fatalf("Error processing config: %s", err)
      +    }
      +
      +    log.SetLevel(cfg.ApplicationLogLevel)
      +
      +    opt := cfg.ToOptions()
      +    opt.CustomFilters = append(opt.CustomFilters, lfilters.NewMyFilter())
      +
      +    log.Fatal(skipper.Run(opt))
      +}
      +
      +

      Code

      +

      Write the code and use the custom filter implemented in https://github.com/szuecs/skipper-example-proxy/blob/main/filters/custom.go +

      [:~]% mkdir -p /tmp/go/skipper
      +[:~]% cd /tmp/go/skipper
      +[:/tmp/go/skipper]% go mod init myproject
      +go: creating new go.mod: module myproject
      +[:/tmp/go/skipper]% cat >main.go
      +package main
      +
      +import (
      +    log "github.com/sirupsen/logrus"
      +    lfilters "github.com/szuecs/skipper-example-proxy/filters"
      +    "github.com/zalando/skipper"
      +    "github.com/zalando/skipper/config"
      +)
      +
      +func main() {
      +    cfg := config.NewConfig()
      +    if err := cfg.Parse(); err != nil {
      +        log.Fatalf("Error processing config: %s", err)
      +    }
      +
      +    log.SetLevel(cfg.ApplicationLogLevel)
      +
      +    opt := cfg.ToOptions()
      +    opt.CustomFilters = append(opt.CustomFilters, lfilters.NewMyFilter())
      +
      +    log.Fatal(skipper.Run(opt))
      +}
      +CTRL-D
      +[:/tmp/go/skipper]%
      +

      +

      Build

      +

      Fetch dependencies and build your skipper binary. +

      [:/tmp/go/skipper]% go mod tidy
      +go: finding module for package github.com/zalando/skipper/config
      +go: finding module for package github.com/szuecs/skipper-example-proxy/filters
      +go: finding module for package github.com/sirupsen/logrus
      +go: finding module for package github.com/zalando/skipper
      +go: found github.com/sirupsen/logrus in github.com/sirupsen/logrus v1.9.3
      +go: found github.com/szuecs/skipper-example-proxy/filters in github.com/szuecs/skipper-example-proxy v0.0.0-20230622190245-63163cbaabc8
      +go: found github.com/zalando/skipper in github.com/zalando/skipper v0.16.117
      +go: found github.com/zalando/skipper/config in github.com/zalando/skipper v0.16.117
      +go: finding module for package github.com/nxadm/tail
      +go: finding module for package github.com/kr/text
      +go: finding module for package github.com/rogpeppe/go-internal/fmtsort
      +go: found github.com/kr/text in github.com/kr/text v0.2.0
      +go: found github.com/rogpeppe/go-internal/fmtsort in github.com/rogpeppe/go-internal v1.10.0
      +...
      +
      +[:/tmp/go/skipper]% go build -o skipper .
      +[:/tmp/go/skipper]%
      +

      +

      Test

      +

      We start the proxy

      +
      # start the proxy
      +[:/tmp/go/skipper]% ./skipper -inline-routes='* -> myFilter() -> status(250) -> <shunt>'
      +[APP]INFO[0000] Expose metrics in codahale format
      +[APP]INFO[0000] enable swarm: false
      +[APP]INFO[0000] Replacing tee filter specification
      +[APP]INFO[0000] Replacing teenf filter specification
      +[APP]INFO[0000] Replacing lua filter specification
      +[APP]INFO[0000] support listener on :9911
      +[APP]INFO[0000] Dataclients are updated once, first load complete
      +[APP]INFO[0000] proxy listener on :9090
      +[APP]INFO[0000] TLS settings not found, defaulting to HTTP
      +[APP]INFO[0000] route settings, reset, route: : * -> myFilter() -> status(250) -> <shunt>
      +[APP]INFO[0000] route settings received
      +[APP]INFO[0000] route settings applied
      +127.0.0.1 - - [22/Jun/2023:21:13:46 +0200] "GET /foo HTTP/1.1" 250 0 "-" "curl/7.49.0" 0 127.0.0.1:9090 - -
      +
      +

      Then we start the client to call the proxy endpoint. +

      # client
      +% curl -v http://127.0.0.1:9090/foo
      +*   Trying 127.0.0.1...
      +* Connected to 127.0.0.1 (127.0.0.1) port 9090 (#0)
      +> GET /foo HTTP/1.1
      +> Host: 127.0.0.1:9090
      +> User-Agent: curl/7.49.0
      +> Accept: */*
      +>
      +< HTTP/1.1 250 status code 250   <-- skipper core filter status(250)
      +< My-Filter: response            <-- your custom filter myFilter()
      +< Server: Skipper
      +< Date: Thu, 22 Jun 2023 19:13:46 GMT
      +< Transfer-Encoding: chunked
      +<
      +* Connection #0 to host 127.0.0.1 left intact
      +

      + + + + + + + + + + + + + +
      +
      + + + +
      + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorials/common-use-cases/index.html b/tutorials/common-use-cases/index.html new file mode 100644 index 0000000000..a2caa0d58e --- /dev/null +++ b/tutorials/common-use-cases/index.html @@ -0,0 +1,1437 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Common Use Cases - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + +
      + +
      + + + + + + +
      +
      + + + +
      +
      +
      + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + + + + +

      Common Use Cases

      + +

      Common Use Cases

      +

      To understand common use cases, we assume you read the basics.

      +

      Redirect handling

      +

      If you want to do a redirect from a route, you can use the +redirectTo() filter in combination with the <shunt> backend. +If you do not specify a path in your redirect, then the path from the +client will be passed further and not modified by the redirect.

      +

      Example:

      +
      % ./bin/skipper -address :8080 -inline-routes 'r: * -> redirectTo(308, "http://127.0.0.1:9999") -> <shunt>'
      +::1 - - [01/Nov/2018:18:42:02 +0100] "GET / HTTP/1.1" 308 0 "-" "curl/7.49.0" 0 localhost:8080 - -
      +::1 - - [01/Nov/2018:18:42:08 +0100] "GET /foo HTTP/1.1" 308 0 "-" "curl/7.49.0" 0 localhost:8080 - -
      +
      +% curl localhost:8080 -v
      +* Rebuilt URL to: localhost:8080/
      +*   Trying ::1...
      +* Connected to localhost (::1) port 8080 (#0)
      +> GET / HTTP/1.1
      +> Host: localhost:8080
      +> User-Agent: curl/7.49.0
      +> Accept: */*
      +>
      +< HTTP/1.1 308 Permanent Redirect
      +< Location: http://127.0.0.1:9999/
      +< Server: Skipper
      +< Date: Thu, 01 Nov 2018 17:42:18 GMT
      +< Content-Length: 0
      +<
      +* Connection #0 to host localhost left intact
      +
      +% curl localhost:8080/foo -v
      +*   Trying ::1...
      +* Connected to localhost (::1) port 8080 (#0)
      +> GET /foo HTTP/1.1
      +> Host: localhost:8080
      +> User-Agent: curl/7.49.0
      +> Accept: */*
      +>
      +< HTTP/1.1 308 Permanent Redirect
      +< Location: http://127.0.0.1:9999/foo
      +< Server: Skipper
      +< Date: Thu, 01 Nov 2018 17:42:14 GMT
      +< Content-Length: 0
      +<
      +* Connection #0 to host localhost left intact
      +
      +

      set absolute path

      +

      If you set a path, in this example /, in your redirect definition, then the path is set to +the chosen value. The Location header is set in the response to /, +but the client sent /foo.

      +
      % ./bin/skipper -address :8080 -inline-routes 'r: * -> redirectTo(308, "http://127.0.0.1:9999/") -> <shunt>'
      +
      +% curl localhost:8080/foo -v
      +*   Trying ::1...
      +* Connected to localhost (::1) port 8080 (#0)
      +> GET /foo HTTP/1.1
      +> Host: localhost:8080
      +> User-Agent: curl/7.49.0
      +> Accept: */*
      +>
      +< HTTP/1.1 308 Permanent Redirect
      +< Location: http://127.0.0.1:9999/
      +< Server: Skipper
      +< Date: Thu, 01 Nov 2018 17:47:17 GMT
      +< Content-Length: 0
      +<
      +* Connection #0 to host localhost left intact
      +
      +

      change base path

      +

      If you want a redirect definition that adds a base path and the +specified path by the client should be appended to this base path you +can use the modPath filter just before the redirectTo() to modify +the base path as you like.

      +

      Route Example shows, that calls to /a/base/foo/bar would be +redirected to https://another-example.com/my/new/base/foo/bar:

      +
      redirect: Path("/a/base/")
      +          -> modPath("/a/base/", "/my/new/base/")
      +          -> redirectTo(308, "https://another-example.com")
      +          -> <shunt>'
      +
      +

      The next example shows how to test a redirect with changed base path +on your computer:

      +
      % ./bin/skipper -address :8080 -inline-routes 'r: * -> modPath("/", "/my/new/base/") -> redirectTo(308, "http://127.0.0.1:9999") -> <shunt>'
      +::1 - - [01/Nov/2018:18:49:45 +0100] "GET /foo HTTP/1.1" 308 0 "-" "curl/7.49.0" 0 localhost:8080 - -
      +
      +% curl localhost:8080/foo -v
      +*   Trying ::1...
      +* Connected to localhost (::1) port 8080 (#0)
      +> GET /foo HTTP/1.1
      +> Host: localhost:8080
      +> User-Agent: curl/7.49.0
      +> Accept: */*
      +>
      +< HTTP/1.1 308 Permanent Redirect
      +< Location: http://127.0.0.1:9999/my/new/base/foo
      +< Server: Skipper
      +< Date: Thu, 01 Nov 2018 17:49:45 GMT
      +< Content-Length: 0
      +<
      +* Connection #0 to host localhost left intact
      +
      + + + + + + + + + + + + + +
      +
      + + + +
      + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorials/development/index.html b/tutorials/development/index.html new file mode 100644 index 0000000000..1cbb06692e --- /dev/null +++ b/tutorials/development/index.html @@ -0,0 +1,1757 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Development - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + +
      + +
      + + + + + + +
      +
      + + + +
      +
      +
      + + + + + +
      +
      +
      + + + + + + + +
      +
      + + + + + + + +

      Development

      + +

      Local Setup

      +

      Build Skipper Binary

      +

      Clone repository and compile with Go.

      +
      git clone https://github.com/zalando/skipper.git
      +cd skipper
      +make skipper
      +
      +

      binary will be ./bin/skipper

      +

      Run Skipper as Proxy with 2 backends

      +

      As a small example, we show how you can run one proxy skipper and 2 +backend skippers.

      +

      Start the proxy that listens on port 9999 and serves all requests with a single route, that +proxies to two backends using the round robin algorithm: +

      ./bin/skipper -inline-routes='r1: * -> <roundRobin, "http://127.0.0.1:9001", "http://127.0.0.1:9002">' --address :9999
      +

      +

      Start two backends, with similar routes, one responds with “1” and the +other with “2” in the HTTP response body: +

      ./bin/skipper -inline-routes='r1: * -> inlineContent("1") -> <shunt>' --address :9001 &
      +./bin/skipper -inline-routes='r1: * -> inlineContent("2") -> <shunt>' --address :9002
      +

      +

      Test the proxy with curl as a client: +

      curl -s http://localhost:9999/foo
      +1
      +curl -s http://localhost:9999/foo
      +2
      +curl -s http://localhost:9999/foo
      +1
      +curl -s http://localhost:9999/foo
      +2
      +

      +

      Debugging Skipper

      +

      It can be helpful to run Skipper in a debug session locally that enables one to inspect variables and do other debugging activities in order to analyze filter and token states.

      +

      For Visual Studion Code users, a simple setup could be to create following launch configuration that compiles Skipper, runs it in a Delve debug session, and then opens the default web browser creating the request. By setting a breakpoint, you can inspect the state of the filter or application. This setup is especially useful when inspecting oauth flows and tokens as it allows stepping through the states.

      +
      +Example `.vscode/launch.json` file + +
      {
      +    "version": "0.2.0",
      +    "configurations": [
      +        {
      +            "name": "Launch Package",
      +            "type": "go",
      +            "request": "launch",
      +            "mode": "debug",
      +            "program": "${workspaceFolder}/cmd/skipper/main.go",
      +            "args": [
      +                "-application-log-level=debug",
      +                "-address=:9999",
      +                "-inline-routes=PathSubtree(\"/\") -> inlineContent(\"Hello World\") -> <shunt>",
      +               // example OIDC setup, using https://developer.microsoft.com/en-us/microsoft-365/dev-program
      +               //  "-oidc-secrets-file=${workspaceFolder}/.vscode/launch.json",
      +               //  "-inline-routes=* -> oauthOidcAnyClaims(\"https://login.microsoftonline.com/<tenant Id>/v2.0\",\"<application id>\",\"<client secret>\",\"http://localhost:9999/authcallback\", \"profile\", \"\", \"\", \"x-auth-email:claims.email x-groups:claims.groups\") -> inlineContent(\"restricted access\") -> <shunt>",
      +            ],
      +            "serverReadyAction": {
      +                "pattern": "route settings applied",
      +                "uriFormat": "http://localhost:9999",
      +                "action": "openExternally"
      +            }
      +        }
      +    ]
      +}
      +
      + +
      + +

      Docs

      +

      We have user documentation and developer documentation separated. +In docs/ you find the user documentation in mkdocs format and +rendered at https://opensource.zalando.com/skipper which is updated automatically with each docs/ change merged to master branch. +Developer documentation for skipper as library users +godoc format is used and rendered at https://godoc.org/github.com/zalando/skipper.

      +

      User documentation

      +

      To see rendered documentation locally run mkdocs serve and navigate to http://127.0.0.1:8000.

      +

      Filters

      +

      Filters allow to change arbitrary HTTP data in the Request or +Response. If you need to read and write the http.Body, please make +sure you discuss the use case before creating a pull request.

      +

      A filter consists of at least two types a filters.Spec and a filters.Filter. +Spec consists of everything that is needed and known before a user +will instantiate a filter.

      +

      A spec will be created in the bootstrap procedure of a skipper +process. A spec has to satisfy the filters.Spec interface Name() string and +CreateFilter([]interface{}) (filters.Filter, error).

      +

      The actual filter implementation has to satisfy the filter.Filter +interface Request(filters.FilterContext) and Response(filters.FilterContext).

      +

      The simplest filter possible is, if filters.Spec and +filters.Filter are the same type:

      +
      type myFilter struct{}
      +
      +func NewMyFilter() *myFilter {
      +    return &myFilter{}
      +}
      +
      +func (spec *myFilter) Name() string { return "myFilter" }
      +
      +func (spec *myFilter) CreateFilter(config []interface{}) (filters.Filter, error) {
      +     return NewMyFilter(), nil
      +}
      +
      +func (f *myFilter) Request(ctx filters.FilterContext) {
      +     // change data in ctx.Request() for example
      +}
      +
      +func (f *myFilter) Response(ctx filters.FilterContext) {
      +     // change data in ctx.Response() for example
      +}
      +
      +

      Find a detailed example at how to develop a filter.

      +

      Filters with cleanup

      +

      Sometimes your filter needs to cleanup resources on shutdown. In Go +functions that do this have often the name Close(). +There is the filters.FilterCloser interface that if you comply with +it, the routing.Route will make sure your filters are closed in case +of routing.Routing was closed.

      +
      type myFilter struct{}
      +
      +func NewMyFilter() *myFilter {
      +    return &myFilter{}
      +}
      +
      +func (spec *myFilter) Name() string { return "myFilter" }
      +
      +func (spec *myFilter) CreateFilter(config []interface{}) (filters.Filter, error) {
      +     return NewMyFilter(), nil
      +}
      +
      +func (f *myFilter) Request(ctx filters.FilterContext) {
      +     // change data in ctx.Request() for example
      +}
      +
      +func (f *myFilter) Response(ctx filters.FilterContext) {
      +     // change data in ctx.Response() for example
      +}
      +
      +func (f *myFilter) Close() error {
      +     // cleanup your filter
      +}
      +
      +

      Filters with error handling

      +

      Sometimes you want to have a filter that wants to get called +Response() even if the proxy will not send a response from the +backend, for example you want to count error status codes, like +the admissionControl +filter. +In this case you need to comply with the following proxy interface:

      +
      // errorHandlerFilter is an opt-in for filters to get called
      +// Response(ctx) in case of errors.
      +type errorHandlerFilter interface {
      +    // HandleErrorResponse returns true in case a filter wants to get called
      +    HandleErrorResponse() bool
      +}
      +
      +

      Example: +

      type myFilter struct{}
      +
      +func NewMyFilter() *myFilter {
      +    return &myFilter{}
      +}
      +
      +func (spec *myFilter) Name() string { return "myFilter" }
      +
      +func (spec *myFilter) CreateFilter(config []interface{}) (filters.Filter, error) {
      +     return NewMyFilter(), nil
      +}
      +
      +func (f *myFilter) Request(ctx filters.FilterContext) {
      +     // change data in ctx.Request() for example
      +}
      +
      +func (f *myFilter) Response(ctx filters.FilterContext) {
      +     // change data in ctx.Response() for example
      +}
      +
      +func (f *myFilter) HandleErrorResponse() bool() {
      +     return true
      +}
      +

      +

      Predicates

      +

      Predicates allow to match a condition, that can be based on arbitrary +HTTP data in the Request. There are also predicates, that use a chance +Traffic() or the current local time, for example After(), to match +a request and do not use the HTTP data at all.

      +

      A predicate consists of at least two types routing.Predicate +and routing.PredicateSpec, which are both interfaces.

      +

      A spec will be created in the bootstrap procedure of a skipper +process. A spec has to satisfy the routing.PredicateSpec interface +Name() string and Create([]interface{}) (routing.Predicate, error).

      +

      The actual predicate implementation has to satisfy the +routing.Predicate interface Match(*http.Request) bool and returns +true if the predicate matches the request. If false is returned, the +routing table will be searched for another route that might match the +given request.

      +

      The simplest possible predicate implementation is, if routing.PredicateSpec and +routing.Predicate are the same type:

      +
      type myPredicate struct{}
      +
      +func NewMyPredicate() routing.PredicateSpec {
      +    return &myPredicate{}
      +}
      +
      +func (spec *myPredicate) Name() string { return "myPredicate" }
      +
      +func (spec *myPredicate) Create(config []interface{}) (routing.Predicate, error) {
      +     return NewMyPredicate(), nil
      +}
      +
      +func (f *myPredicate) Match(r *http.Request) bool {
      +     // match data in *http.Request for example
      +     return true
      +}
      +
      +

      Predicates are quite similar to implement as Filters, so for a more +complete example, find an example how to develop a filter.

      +

      Dataclients

      +

      Dataclients are the way how to integrate new route +sources. Dataclients pull information from a source and create routes +for skipper’s routing table.

      +

      You have to implement routing.DataClient, which is an interface that defines +function signatures LoadAll() ([]*eskip.Route, error) and +LoadUpdate() ([]*eskip.Route, []string, error).

      +

      The LoadUpdate() method can be implemented either in a way that +returns immediately, or blocks until there is a change. The routing +package will regularly call the LoadUpdate() method with a small +delay between the calls.

      +

      A complete example is the routestring implementation, which fits in +less than 50 lines of code.

      +

      Opentracing

      +

      Your custom Opentracing implementations need to satisfy the opentracing.Tracer interface from +https://github.com/opentracing/opentracing-go and need to be loaded as +a plugin, which might change in the future. +Please check the tracing package +and ask for further guidance in our community channels.

      +

      Core

      +

      Non trivial changes, proposals and enhancements to the core of skipper +should be discussed first in a Github issue, such that we can think +about how this fits best in the project and how to achieve the most +useful result. Feel also free to reach out to our community +channels and discuss +there your idea.

      +

      Every change in core has to have tests included and should be a non +breaking change. We planned since a longer time a breaking change, but +we should coordinate to make it as good as possible for all skipper as +library users. Most often a breaking change can be postponed to the +future and a feature independently added and the old feature might be +deprecated to delete it later. Use of deprecated features should be shown +in logs with a log.Warning.

      + + + + + + + + + + + + + +
      +
      + + + +
      + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorials/operations/index.html b/tutorials/operations/index.html new file mode 100644 index 0000000000..45bfb2ad06 --- /dev/null +++ b/tutorials/operations/index.html @@ -0,0 +1,1724 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Operations - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + +
      + +
      + + + + + + +
      +
      + + + +
      +
      +
      + + + + + +
      +
      +
      + + + + + + + +
      +
      + + + + + + + +

      Operations

      + +

      Kubernetes

      +

      In the beginning we chose to run Skipper as +daemonset +to run it on all worker nodes. Since 2018 we run Skipper as deployment +with an hpa, horizontal Pod autoscaler, +to scale Skipper by CPU usage. +All our clusters are using AWS autoscaling groups (ASG), to increase +and decrease the number of running nodes in a cluster based on use.

      +

      In both deployment styles we run Skipper with hostnetwork: true and +point the loadbalancer in front of it to the skipper port of all +worker nodes. In our case we run an AWS Application loadbalancer (ALB) +in front, and we terminate TLS on the ALB. A health check from the ALB +detects, if Skipper is running on a worker node or not.

      +

      First steps

      +

      The next part will show you how to run Skipper with a minimal feature +set, that supports already most of the features.

      +

      A minimal set of arguments that should be chosen to support most +Kubernetes use cases:

      +
                - "skipper"
      +          - "-kubernetes"
      +          - "-kubernetes-in-cluster"
      +          - "-kubernetes-path-mode=path-prefix"
      +          - "-address=:9999"
      +          - "-wait-first-route-load"
      +          - "-proxy-preserve-host"
      +          - "-enable-ratelimits"
      +          - "-experimental-upgrade"
      +          - "-lb-healthcheck-interval=3s"
      +          - "-metrics-flavour=prometheus"
      +          - "-metrics-exp-decay-sample"
      +          - "-serve-host-metrics"
      +          - "-disable-metrics-compat"
      +          - "-enable-connection-metrics"
      +          - "-histogram-metric-buckets=.0001,.00025,.0005,.00075,.001,.0025,.005,.0075,.01,.025,.05,.075,.1,.2,.3,.4,.5,.75,1,2,3,4,5,7,10,15,20,30,60,120,300,600"
      +          - "-max-audit-body=0"
      +          - "-idle-timeout-server=62s"
      +
      +

      Skipper started with these options will support instance based +ratelimits, a wide range of Prometheus metrics, websockets and a +better HTTP path routing than the default Kubernetes Ingress spec +supports.

      +

      The former Kubernetes Ingress v1beta1 spec defined a path +as regular expression, which is not what most people would expect, nor +want. Skipper defaults in Kubernetes to use the PathRegexp predicate +for routing, because of the spec. We believe the better default is the +path prefix mode, that uses PathSubtree predicate, +instead. Path prefix search is much more scalable and can not lead to +unexpected results by not so experienced regular expressions users. +Since Kubernetes v1.18, Ingress v1 path definition +supports all path matching modes that are common in skipper:

      +
        +
      • pathType: Exact maps to Path()
      • +
      • pathType: Prefix maps to PathSubtree()
      • +
      • pathType: ImplementationSpecific is defined as you set path prefix mode.
      • +
      +

      To find more information about Metrics, including formats and example +Prometheus queries you find in the metrics +section. +The settings shown above support system and application metrics to +carefully monitor Skipper and your backend applications. Backend +application metrics get error rates and latency buckets based on host +headers. The chosen options are a good setup to safely run all +workloads from small to high traffic.

      +

      The option -max-audit-body=0, won’t log the HTTP body, if you would +do audit logging, to have a safe default.

      +

      The last option -idle-timeout-server=62s was chosen, because of a +known issue, if you +run in a multi layer loadbalancer, with ALBs in front of Skipper. +ALBs idle connection timeout is 60s +and AWS support told us to run the backends with a bigger timeout, +than the ALB in front.

      +

      Opt-In more features

      +

      Reverse Source Predicate

      +

      Depending on the HTTP loadbalancer in front of your Skippers, you might +want to set -reverse-source-predicate. This setting reverses the +lookup of the client IP to find it in the X-Forwarded-For header +values. If you do not care about +clientRatelimits +based on X-Forwarded-For headers, you can also ignore this.

      +

      Cluster Ratelimit

      +

      Ratelimits can be calculated for the whole cluster instead of having +only the instance based ratelimits. The common term we use in skipper +documentation is cluster ratelimit. +There are two option, but we highly recommend the use of Redis based +cluster ratelimits. To support redis based cluster ratelimits you have to +use -enable-swarm and add a list of URLs to redis +-swarm-redis-urls=skipper-ingress-redis-0.skipper-ingress-redis.kube-system.svc.cluster.local:6379,skipper-ingress-redis-1.skipper-ingress-redis.kube-system.svc.cluster.local:6379. We +run redis as +statefulset +with a headless +service +to have predictable names. We chose to not use a persistent volume, +because storing the data in memory is good enough for this use case.

      +

      East West

      +
      +

      Attention

      +

      This feature is deprecated. Consider using EastWest +Range.

      +
      +

      Skipper supports cluster internal service-to-service communication as +part of running as an API Gateway with an East-West +setup. +You have to add -enable-kubernetes-east-west and optionally choose a +domain +-kubernetes-east-west-domain=.ingress.cluster.local. Be warned: There is a +known bug, if you +combine it with custom routes. You might want to consider EastWest +Range.

      +

      East West Range

      +

      Alternatively, you can use Kubernetes East West Range feature. Use the +flag -kubernetes-east-west-range-domains to define the cluster +internal domains -kubernetes-east-west-range-predicates to define the +predicates that will be appended to every +route identified as an internal domain. Differently from the +-enable-kubernetes-east-west and the +-kubernetes-east-west-domain=.ingress.cluster.local flags (check +East West) this feature +will not automatically create routes for you and both features shouldn’t +be used in combination. The ingress and/or route groups resources must +opt-in for east west range routes, explicitly defining them. For example, +given that Skipper was initialized with the following east-west range flags:

      +
      skipper \
      +  -kubernetes-east-west-range-domains="ingress.cluster.local" \
      +  -kubernetes-east-west-range-predicates='ClientIP("10.2.0.0/16")'
      +
      +

      and the following ingress is defined:

      +
      apiVersion: networking.k8s.io/v1
      +kind: Ingress
      +metadata:
      +  name: qux
      +  namespace: foo
      +spec:
      +  rules:
      +  - host: example.ingress.cluster.local
      +    http:
      +      paths:
      +      - path: "/"
      +        pathType: Prefix
      +        backend:
      +          service:
      +            name: qux
      +            port:
      +              name: baz
      +
      +

      Skipper will secure this route adding the predicate ClientIP("10.2.0.0/16").

      +

      The same ingress might be used for internal and external hostnames. For +example, given a slightly modified version of the ingress:

      +
      apiVersion: networking.k8s.io/v1
      +kind: Ingress
      +metadata:
      +...
      +spec:
      +  rules:
      +  - host: example.ingress.cluster.local
      +    http: ...
      +  - host: example.mydomain.org
      +    http: ...
      +
      +

      will make the service accessible through example.ingress.cluster.local +and example.mydomain.org, but the first hostname will only accept +connections from the network 10.2.0.0/16, on this specific scenario.

      +

      You can specify multiple east-west range domains and predicates:

      +
      skippper \
      +  -kubernetes-east-west-range-domains="ingress.cluster.local,another.cluster.local"
      +  -kubernetes-east-west-range-predicates='ClientIP("10.2.0.0/16") && SourceLastFrom("10.2.0.0/16")'
      +
      +

      API monitoring and Auth

      +

      As part of API Gateway features, skipper supports API +monitoring +and common authentication and authorization +protocols in Microservices architectures.

      +

      OpenTracing

      +

      Skipper has support for different OpenTracing API vendors, including +jaeger, +lightstep and +instana. +For example to configure the lightstep opentracing plugin, with a +searchable component and cluster tag you can use: +- "-opentracing=lightstep component-name=skipper-ingress token=$(LIGHTSTEP_TOKEN) collector=tracing-collector.endpoint:8444 cmd-line=skipper-ingress max-buffered-spans=4096 tag=cluster=mycluster". +The LIGHTSTEP_TOKEN is passed as environment variable to +the process.

      +

      Global default filters

      +

      Skipper can also add global default filters, +which will be automatically added to all routes. For example you can +use -default-filters-prepend="enableAccessLog(4,5)" to enable only +access logs in case of HTTP codes 4xx or 5xx. In the specific case of +*AccessLog filters and -default-filters-prepend, the default +choice can be overridden by users via zalando.org/skipper-filter +ingress annotation.

      +

      Production example

      +

      A full production deployment example you find at Zalando’s configuration +repository.

      +

      Recommendations

      +

      We recommend to run a loadbalancer in front of Skipper to terminate +TLS, such that cluster users can not access your keys and +certificates. While skipper supports SNI, hardware and cloud +loadbalancers often have hardware support to terminate TLS. It’s +cheaper for you to offload TLS to these devices and trust your compute +vendor.

      +

      We recommend to start simple and grow the feature set from there. +Check features, that are used in >60 production clusters in Zalando’s configuration +repository.

      +

      Dashboards

      +

      As an operator, build a Skipper dashboard and learn how Skipper and +the Go runtime behaves with your workload. We successfully ran several +load tests from 0 to 25k requests per seconds. The load test was +ramping up in less than a minute with initially 3 Skipper Pods, with an +HPA that has CPU target value of 100%.

      +

      Operations dashboard: +skipper operations dashboard 1 +skipper operations dashboard 2

      +

      Application metrics dashboard: +skipper backends dashboard

      + + + + + + + + + + + + + +
      +
      + + + +
      + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorials/ratelimit/index.html b/tutorials/ratelimit/index.html new file mode 100644 index 0000000000..0c368a8150 --- /dev/null +++ b/tutorials/ratelimit/index.html @@ -0,0 +1,1734 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Ratelimits - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + +
      + +
      + + + + + + +
      +
      + + + +
      +
      +
      + + + + + +
      +
      +
      + + + + + + + +
      +
      + + + + + + + +

      Ratelimits

      + +

      Overview

      +

      Ratelimits are calculated for a number of requests and a +time.Duration for a given bucket. To enable rate limits you need to +run skipper with -enable-ratelimits.

      +

      A time.Duration is specified as string and can for example be “10s” +for ten seconds, “5m” for five minutes or “2h” for two hours.

      +

      As bucket skipper can use either the backend or some client +information.

      +

      In case of a backend ratelimit the bucket is only one global for one +route.

      +

      In case of a client ratelimit the buckets are created by the +used ratelimit.Lookuper, which defaults to the X-Forwarded-For +header, but can be also the Authorization header. So for the client +ratelimit with X-Forwarded-For header, the client IP that the first +proxy in the list sees will be used to lookup the bucket to count +requests.

      +

      Instance local Ratelimit

      +

      Filters ratelimit() and clientRatelimit() calculate the ratelimit +in a local view having no information about other skipper instances.

      +

      Backend Ratelimit

      +

      The backend ratelimit filter is ratelimit() and it is the simplest +one. You can define how many requests a route allows for a given +time.Duration to send to all backends of the route. This means that you +can not limit traffic to a single backend instance.

      +

      For example to limit the route to 10 requests per minute for each +skipper instance, you can specify:

      +
      ratelimit(10, "1m")
      +
      +

      Client Ratelimit

      +

      The client ratelimit filter is clientRatelimit() and it uses +information from the request to find the bucket which will get the +increased request count.

      +

      For example to limit the route to 10 requests per minute for each +skipper instance for the same client selected by the X-Forwarded-For +header, you can specify:

      +
      clientRatelimit(10, "1m")
      +
      +

      There is an optional third argument that selects the same client by HTTP +header value. As an example for Authorization Header you would use:

      +
      clientRatelimit(10, "1m", "Authorization")
      +
      +

      The optional third argument can create an AND combined Header +ratelimit. The header names must be separated by ,. For example all of the +specified headers have to be the same to recognize them as the same +client:

      +
      clientRatelimit(10, "1m", "X-Forwarded-For,Authorization,X-Foo")
      +
      +

      Internally skipper has a clean interval to clean up old buckets to reduce +the memory footprint in the long run.

      +

      Security Consideration

      +

      ClientRatelimit works on data provided by the client. In theory an +attacker likely can workaround all of your configurations. On the +other hand there is always a pattern in attacks, and you are more +likely being able to find the pattern and mitigate the attack, if you +have a powerful tool like the provided clientRatelimit.

      +

      Cluster Ratelimit

      +

      A cluster ratelimit computes all requests for all skipper peers. This +requires, that you run skipper with -enable-swarm and select one of +the two implementations:

      + +

      Make sure all requirements, that are dependent on the implementation +and your dataclient in use.

      +

      Redis based Cluster Ratelimits

      +

      This solution is independent of the dataclient being used. +You have to run one or more Redis instances. +See also Running with Redis based Cluster Ratelimits.

      +

      There are 3 different configurations to assign Redis instances as a Skipper Redis swarm.

      +

      Static

      +

      Specify -swarm-redis-urls, multiple instances can be separated by comma, +for example: -swarm-redis-urls=redis1:6379,redis2:6379. +Use this if you don’t need to scale your Redis instances.

      +

      Kubernetes Service Selector

      +

      Specify -kubernetes-redis-service-namespace=<namespace>, -kubernetes-redis-service-name=<name> +and optional -kubernetes-redis-service-port=<port number>.

      +

      Skipper will update Redis addresses every 10 seconds from specified service endpoints. +This allows you to dynamically scale Redis instances. +Note that when -kubernetes is set Skipper also fetches Ingresses and RouteGroups for routing, +see ingress-controller deployment docs.

      +

      HTTP Endpoint

      +

      Specify -swarm-redis-remote=http://127.0.0.1/redis/endpoints,

      +

      Skipper will update Redis addresses every 10 seconds from this remote URL +that should return data in the following JSON format: +

      {
      +    "endpoints": [
      +        {"address": "10.2.0.1:6379"}, {"address": "10.2.0.2:6379"},
      +        {"address": "10.2.0.3:6379"}, {"address": "10.2.0.4:6379"},
      +        {"address": "10.2.0.5:6379"}
      +    ]
      +}
      +

      +

      If you have routesrv proxy enabled, +you need to configure Skipper with the flag -swarm-redis-remote=http://<routesrv-service-name>.<routesrv-namespace>.svc.cluster.local/swarm/redis/shards. +Routesrv will be responsible for collecting Redis endpoints and Skipper will poll them from it.

      +

      Implementation

      +

      The implementation use Redis ring +to be able to shard via client hashing and spread the load across +multiple Redis instances to be able to scale out the shared storage.

      +

      The ratelimit algorithm is a sliding window and makes use of the +following Redis commands:

      + +

      Picture showing Skipper with Redis based swarm and ratelimit

      +

      SWIM based Cluster Ratelimits

      +

      SWIM +is a “Scalable Weakly-consistent Infection-style Process Group +Membership Protocol”, which is very interesting to use for cluster +ratelimits. The implementation has some weaknesses in the algorithm, +that lead sometimes to too much ratelimits or too few and therefore is +not considered to be stable. For running skipper in Kubernetes with +this, see also Running with SWIM based Cluster Ratelimits

      +

      In case of Kubernetes you might specify additionally +-swarm-label-selector-key, which defaults to “application” and +-swarm-label-selector-value, which defaults to “skipper-ingress” and +-swarm-namespace, which defaults to “kube-system”.

      +

      The following shows the setup of a SWIM based cluster ratelimit:

      +

      Picture showing Skipper SWIM based swarm and ratelimit

      +

      Backend Ratelimit

      +

      The backend ratelimit filter is clusterRatelimit(). You can define +how many requests a route allows for a given time.Duration in total +for all skipper instances summed up. The first parameter is the group +parameter, which can be used to select the same ratelimit group across +one or more routes

      +

      For example rate limit “groupA” limits the rate limit group to 10 +requests per minute in total for the cluster, you can specify:

      +
      clusterRatelimit("groupA", 10, "1m")
      +
      +

      Client Ratelimit

      +

      The client ratelimit filter is clusterClientRatelimit() and it uses +information from the request to find the bucket which will get the +increased request count. You can define how many requests a client is +allowed to hit this route for a given time.Duration in total for all +skipper instances summed up. The first parameter is the group +parameter, which can be used to select the same ratelimit group across +one or more routes

      +

      For example rate limit “groupB” limits the rate limit group to 10 +requests per minute for the full skipper swarm for the same client +selected by the X-Forwarded-For header, you can specify:

      +
      clusterClientRatelimit("groupB", 10, "1m")
      +
      +

      The same for Authorization Header you would use:

      +
      clusterClientRatelimit("groupC", 10, "1m", "Authorization")
      +
      +

      The optional fourth argument can create an AND combined Header +ratelimit. The header names must be separated by ,. For example all +of the specified headers have to be the same to recognize them as the +same client:

      +
      clusterClientRatelimit("groupC", 5, "10s", "X-Forwarded-For,Authorization,X-Foo")
      +
      +

      Internally skipper has a clean interval to clean up old buckets to reduce +the memory footprint in the long run.

      +

      Security Consideration

      +

      ClusterClientRatelimit works on data provided by the client. In theory an +attacker likely can workaround all of your configurations. On the +other hand there is always a pattern in attacks, and you are more +likely being able to find the pattern and mitigate the attack, if you +have a powerful tool like the provided clusterClientRatelimit.

      + + + + + + + + + + + + + +
      +
      + + + +
      + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorials/shadow-traffic/index.html b/tutorials/shadow-traffic/index.html new file mode 100644 index 0000000000..8ce26687fd --- /dev/null +++ b/tutorials/shadow-traffic/index.html @@ -0,0 +1,1398 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Shadow Traffic - Skipper + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + +
      + +
      + + + + + + +
      +
      + + + +
      +
      +
      + + + + + +
      +
      +
      + + + + + + + +
      +
      + + + + + + + +

      Shadow Traffic

      +

      This tutorial will show how to setup routing for shadow traffic, where one backend (main) will receive the full +traffic, while a shadowing backend (test) will receive only a certain percentage of the same traffic.

      +

      Used Predicates:

      + +

      Used Filters:

      + +

      Shadow Traffic Setup

      +

      1. Initial state

      +

      Before the shadow traffic, we are sending all traffic to the main backend.

      +
      main: * -> "https://main.example.org";
      +
      +

      2. Clone the main route, handling 10% of the traffic

      +

      Before generating the shadow traffic, we create an identical clone of the main route that will handle only 10% +of the traffic, while the rest stays being handled by the main route.

      +
      main: * -> "https://main.example.org";
      +split: Traffic(.1) -> "https://main.example.org";
      +
      +

      3. Prepare the route for the shadow traffic

      +

      The route introduced next won’t handle directly any incoming requests, because they won’t be matched by the +Tee predicate, but it is prepared to send tee requests to the alternative, +‘shadow’ backend.

      +
      main: * -> "https://main.example.org";
      +split: Traffic(.1) -> "https://main.example.org";
      +shadow: Tee("shadow-test-1") && True() -> "https://shadow.example.org";
      +
      +

      4. Apply the teeLoopback filter

      +

      Now we can apply the teeLoopback filter to the ‘split’ route, using the +same label as we did in the Tee predicate.

      +
      main: * -> "https://main.example.org";
      +split: Traffic(.1) -> teeLoopback("shadow-test-1") -> "https://main.example.org";
      +shadow: Tee("shadow-test-1") && True() -> "https://shadow.example.org";
      +
      +

      Note that as of now, we need to increase the weight of the ‘shadow’ route by adding the True() predicate in order to avoid that the ‘split’ +route would match the cloned request again.

      +

      After this, the ‘split’ route will still send all the handled requests, 10% of the total traffic, to the main +backend, while the rest of the traffic is routed there by the ‘main’ route. However, the +teeLoopback filter will also clone the traffic of the ‘split’ route, 10% of +the total, and reapply the routing on it, during which these requests will be matched by the +Tee predicate, and sent to the shadow backend.

      + + + + + + + + + + + + + +
      +
      + + + +
      + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file