diff --git a/news/index.html b/news/index.html index 35541756..b4ea546d 100644 --- a/news/index.html +++ b/news/index.html @@ -68,7 +68,7 @@

 system.time(SLmetrics::entropy(pk))
#>    user  system elapsed 
-#>   0.001   0.001   0.001
+#> 0.280 0.001 0.012
 # 3) Disable OpenMP
 SLmetrics::setUseOpenMP(FALSE)
@@ -76,7 +76,7 @@

 system.time(SLmetrics::entropy(pk))
#>    user  system elapsed 
-#>   0.002   0.000   0.002
+#> 0.001 0.000 0.001

diff --git a/pkgdown.yml b/pkgdown.yml index ec70c84c..9ea3c12d 100644 --- a/pkgdown.yml +++ b/pkgdown.yml @@ -5,7 +5,7 @@ articles: classification_problems: classification_problems.html regression_problems: regression_problems.html SLmetrics: SLmetrics.html -last_built: 2025-01-04T07:16Z +last_built: 2025-01-04T16:38Z urls: reference: https://serkor1.github.io/SLmetrics/reference article: https://serkor1.github.io/SLmetrics/articles diff --git a/reference/figures/README-performance-1.png b/reference/figures/README-performance-1.png index 1211ff25..3ab73618 100644 Binary files a/reference/figures/README-performance-1.png and b/reference/figures/README-performance-1.png differ diff --git a/search.json b/search.json index a8c33b5f..d276da80 100644 --- a/search.json +++ b/search.json @@ -1 +1 @@ -[{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"our-pledge","dir":"","previous_headings":"","what":"Our Pledge","title":"Contributor Covenant Code of Conduct","text":"members, contributors, leaders pledge make participation community harassment-free experience everyone, regardless age, body size, visible invisible disability, ethnicity, sex characteristics, gender identity expression, level experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, sexual identity orientation. pledge act interact ways contribute open, welcoming, diverse, inclusive, healthy community.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"our-standards","dir":"","previous_headings":"","what":"Our Standards","title":"Contributor Covenant Code of Conduct","text":"Examples behavior contributes positive environment community include: Demonstrating empathy kindness toward people respectful differing opinions, viewpoints, experiences Giving gracefully accepting constructive feedback Accepting responsibility apologizing affected mistakes, learning experience Focusing best just us individuals, overall community Examples unacceptable behavior include: use sexualized language imagery, sexual attention advances kind Trolling, insulting derogatory comments, personal political attacks Public private harassment Publishing others’ private information, physical email address, without explicit permission conduct reasonably considered inappropriate professional setting","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"enforcement-responsibilities","dir":"","previous_headings":"","what":"Enforcement Responsibilities","title":"Contributor Covenant Code of Conduct","text":"Community leaders responsible clarifying enforcing standards acceptable behavior take appropriate fair corrective action response behavior deem inappropriate, threatening, offensive, harmful. Community leaders right responsibility remove, edit, reject comments, commits, code, wiki edits, issues, contributions aligned Code Conduct, communicate reasons moderation decisions appropriate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"scope","dir":"","previous_headings":"","what":"Scope","title":"Contributor Covenant Code of Conduct","text":"Code Conduct applies within community spaces, also applies individual officially representing community public spaces. Examples representing community include using official e-mail address, posting via official social media account, acting appointed representative online offline event.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"enforcement","dir":"","previous_headings":"","what":"Enforcement","title":"Contributor Covenant Code of Conduct","text":"Instances abusive, harassing, otherwise unacceptable behavior may reported community leaders responsible enforcement serkor1@duck.com. complaints reviewed investigated promptly fairly. community leaders obligated respect privacy security reporter incident.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"enforcement-guidelines","dir":"","previous_headings":"","what":"Enforcement Guidelines","title":"Contributor Covenant Code of Conduct","text":"Community leaders follow Community Impact Guidelines determining consequences action deem violation Code Conduct:","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"id_1-correction","dir":"","previous_headings":"Enforcement Guidelines","what":"1. Correction","title":"Contributor Covenant Code of Conduct","text":"Community Impact: Use inappropriate language behavior deemed unprofessional unwelcome community. Consequence: private, written warning community leaders, providing clarity around nature violation explanation behavior inappropriate. public apology may requested.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"id_2-warning","dir":"","previous_headings":"Enforcement Guidelines","what":"2. Warning","title":"Contributor Covenant Code of Conduct","text":"Community Impact: violation single incident series actions. Consequence: warning consequences continued behavior. interaction people involved, including unsolicited interaction enforcing Code Conduct, specified period time. includes avoiding interactions community spaces well external channels like social media. Violating terms may lead temporary permanent ban.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"id_3-temporary-ban","dir":"","previous_headings":"Enforcement Guidelines","what":"3. Temporary Ban","title":"Contributor Covenant Code of Conduct","text":"Community Impact: serious violation community standards, including sustained inappropriate behavior. Consequence: temporary ban sort interaction public communication community specified period time. public private interaction people involved, including unsolicited interaction enforcing Code Conduct, allowed period. Violating terms may lead permanent ban.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"id_4-permanent-ban","dir":"","previous_headings":"Enforcement Guidelines","what":"4. Permanent Ban","title":"Contributor Covenant Code of Conduct","text":"Community Impact: Demonstrating pattern violation community standards, including sustained inappropriate behavior, harassment individual, aggression toward disparagement classes individuals. Consequence: permanent ban sort public interaction within community.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"attribution","dir":"","previous_headings":"","what":"Attribution","title":"Contributor Covenant Code of Conduct","text":"Code Conduct adapted Contributor Covenant, version 2.1, available https://www.contributor-covenant.org/version/2/1/code_of_conduct.html. Community Impact Guidelines inspired [Mozilla’s code conduct enforcement ladder][https://github.com/mozilla/inclusion]. answers common questions code conduct, see FAQ https://www.contributor-covenant.org/faq. Translations available https://www.contributor-covenant.org/translations.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":null,"dir":"","previous_headings":"","what":"GNU General Public License","title":"GNU General Public License","text":"Version 3, 29 June 2007Copyright © 2007 Free Software Foundation, Inc.  Everyone permitted copy distribute verbatim copies license document, changing allowed.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"preamble","dir":"","previous_headings":"","what":"Preamble","title":"GNU General Public License","text":"GNU General Public License free, copyleft license software kinds works. licenses software practical works designed take away freedom share change works. contrast, GNU General Public License intended guarantee freedom share change versions program–make sure remains free software users. , Free Software Foundation, use GNU General Public License software; applies also work released way authors. can apply programs, . speak free software, referring freedom, price. General Public Licenses designed make sure freedom distribute copies free software (charge wish), receive source code can get want , can change software use pieces new free programs, know can things. protect rights, need prevent others denying rights asking surrender rights. Therefore, certain responsibilities distribute copies software, modify : responsibilities respect freedom others. example, distribute copies program, whether gratis fee, must pass recipients freedoms received. must make sure , , receive can get source code. must show terms know rights. Developers use GNU GPL protect rights two steps: (1) assert copyright software, (2) offer License giving legal permission copy, distribute /modify . developers’ authors’ protection, GPL clearly explains warranty free software. users’ authors’ sake, GPL requires modified versions marked changed, problems attributed erroneously authors previous versions. devices designed deny users access install run modified versions software inside , although manufacturer can . fundamentally incompatible aim protecting users’ freedom change software. systematic pattern abuse occurs area products individuals use, precisely unacceptable. Therefore, designed version GPL prohibit practice products. problems arise substantially domains, stand ready extend provision domains future versions GPL, needed protect freedom users. Finally, every program threatened constantly software patents. States allow patents restrict development use software general-purpose computers, , wish avoid special danger patents applied free program make effectively proprietary. prevent , GPL assures patents used render program non-free. precise terms conditions copying, distribution modification follow.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_0-definitions","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"0. Definitions","title":"GNU General Public License","text":"“License” refers version 3 GNU General Public License. “Copyright” also means copyright-like laws apply kinds works, semiconductor masks. “Program” refers copyrightable work licensed License. licensee addressed “”. “Licensees” “recipients” may individuals organizations. “modify” work means copy adapt part work fashion requiring copyright permission, making exact copy. resulting work called “modified version” earlier work work “based ” earlier work. “covered work” means either unmodified Program work based Program. “propagate” work means anything , without permission, make directly secondarily liable infringement applicable copyright law, except executing computer modifying private copy. Propagation includes copying, distribution (without modification), making available public, countries activities well. “convey” work means kind propagation enables parties make receive copies. Mere interaction user computer network, transfer copy, conveying. interactive user interface displays “Appropriate Legal Notices” extent includes convenient prominently visible feature (1) displays appropriate copyright notice, (2) tells user warranty work (except extent warranties provided), licensees may convey work License, view copy License. interface presents list user commands options, menu, prominent item list meets criterion.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_1-source-code","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"1. Source Code","title":"GNU General Public License","text":"“source code” work means preferred form work making modifications . “Object code” means non-source form work. “Standard Interface” means interface either official standard defined recognized standards body, , case interfaces specified particular programming language, one widely used among developers working language. “System Libraries” executable work include anything, work whole, () included normal form packaging Major Component, part Major Component, (b) serves enable use work Major Component, implement Standard Interface implementation available public source code form. “Major Component”, context, means major essential component (kernel, window system, ) specific operating system () executable work runs, compiler used produce work, object code interpreter used run . “Corresponding Source” work object code form means source code needed generate, install, (executable work) run object code modify work, including scripts control activities. However, include work’s System Libraries, general-purpose tools generally available free programs used unmodified performing activities part work. example, Corresponding Source includes interface definition files associated source files work, source code shared libraries dynamically linked subprograms work specifically designed require, intimate data communication control flow subprograms parts work. Corresponding Source need include anything users can regenerate automatically parts Corresponding Source. Corresponding Source work source code form work.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_2-basic-permissions","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"2. Basic Permissions","title":"GNU General Public License","text":"rights granted License granted term copyright Program, irrevocable provided stated conditions met. License explicitly affirms unlimited permission run unmodified Program. output running covered work covered License output, given content, constitutes covered work. License acknowledges rights fair use equivalent, provided copyright law. may make, run propagate covered works convey, without conditions long license otherwise remains force. may convey covered works others sole purpose make modifications exclusively , provide facilities running works, provided comply terms License conveying material control copyright. thus making running covered works must exclusively behalf, direction control, terms prohibit making copies copyrighted material outside relationship . Conveying circumstances permitted solely conditions stated . Sublicensing allowed; section 10 makes unnecessary.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_3-protecting-users-legal-rights-from-anti-circumvention-law","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"3. Protecting Users’ Legal Rights From Anti-Circumvention Law","title":"GNU General Public License","text":"covered work shall deemed part effective technological measure applicable law fulfilling obligations article 11 WIPO copyright treaty adopted 20 December 1996, similar laws prohibiting restricting circumvention measures. convey covered work, waive legal power forbid circumvention technological measures extent circumvention effected exercising rights License respect covered work, disclaim intention limit operation modification work means enforcing, work’s users, third parties’ legal rights forbid circumvention technological measures.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_4-conveying-verbatim-copies","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"4. Conveying Verbatim Copies","title":"GNU General Public License","text":"may convey verbatim copies Program’s source code receive , medium, provided conspicuously appropriately publish copy appropriate copyright notice; keep intact notices stating License non-permissive terms added accord section 7 apply code; keep intact notices absence warranty; give recipients copy License along Program. may charge price price copy convey, may offer support warranty protection fee.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_5-conveying-modified-source-versions","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"5. Conveying Modified Source Versions","title":"GNU General Public License","text":"may convey work based Program, modifications produce Program, form source code terms section 4, provided also meet conditions: ) work must carry prominent notices stating modified , giving relevant date. b) work must carry prominent notices stating released License conditions added section 7. requirement modifies requirement section 4 “keep intact notices”. c) must license entire work, whole, License anyone comes possession copy. License therefore apply, along applicable section 7 additional terms, whole work, parts, regardless packaged. License gives permission license work way, invalidate permission separately received . d) work interactive user interfaces, must display Appropriate Legal Notices; however, Program interactive interfaces display Appropriate Legal Notices, work need make . compilation covered work separate independent works, nature extensions covered work, combined form larger program, volume storage distribution medium, called “aggregate” compilation resulting copyright used limit access legal rights compilation’s users beyond individual works permit. Inclusion covered work aggregate cause License apply parts aggregate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_6-conveying-non-source-forms","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"6. Conveying Non-Source Forms","title":"GNU General Public License","text":"may convey covered work object code form terms sections 4 5, provided also convey machine-readable Corresponding Source terms License, one ways: ) Convey object code , embodied , physical product (including physical distribution medium), accompanied Corresponding Source fixed durable physical medium customarily used software interchange. b) Convey object code , embodied , physical product (including physical distribution medium), accompanied written offer, valid least three years valid long offer spare parts customer support product model, give anyone possesses object code either (1) copy Corresponding Source software product covered License, durable physical medium customarily used software interchange, price reasonable cost physically performing conveying source, (2) access copy Corresponding Source network server charge. c) Convey individual copies object code copy written offer provide Corresponding Source. alternative allowed occasionally noncommercially, received object code offer, accord subsection 6b. d) Convey object code offering access designated place (gratis charge), offer equivalent access Corresponding Source way place charge. need require recipients copy Corresponding Source along object code. place copy object code network server, Corresponding Source may different server (operated third party) supports equivalent copying facilities, provided maintain clear directions next object code saying find Corresponding Source. Regardless server hosts Corresponding Source, remain obligated ensure available long needed satisfy requirements. e) Convey object code using peer--peer transmission, provided inform peers object code Corresponding Source work offered general public charge subsection 6d. separable portion object code, whose source code excluded Corresponding Source System Library, need included conveying object code work. “User Product” either (1) “consumer product”, means tangible personal property normally used personal, family, household purposes, (2) anything designed sold incorporation dwelling. determining whether product consumer product, doubtful cases shall resolved favor coverage. particular product received particular user, “normally used” refers typical common use class product, regardless status particular user way particular user actually uses, expects expected use, product. product consumer product regardless whether product substantial commercial, industrial non-consumer uses, unless uses represent significant mode use product. “Installation Information” User Product means methods, procedures, authorization keys, information required install execute modified versions covered work User Product modified version Corresponding Source. information must suffice ensure continued functioning modified object code case prevented interfered solely modification made. convey object code work section , , specifically use , User Product, conveying occurs part transaction right possession use User Product transferred recipient perpetuity fixed term (regardless transaction characterized), Corresponding Source conveyed section must accompanied Installation Information. requirement apply neither third party retains ability install modified object code User Product (example, work installed ROM). requirement provide Installation Information include requirement continue provide support service, warranty, updates work modified installed recipient, User Product modified installed. Access network may denied modification materially adversely affects operation network violates rules protocols communication across network. Corresponding Source conveyed, Installation Information provided, accord section must format publicly documented (implementation available public source code form), must require special password key unpacking, reading copying.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_7-additional-terms","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"7. Additional Terms","title":"GNU General Public License","text":"“Additional permissions” terms supplement terms License making exceptions one conditions. Additional permissions applicable entire Program shall treated though included License, extent valid applicable law. additional permissions apply part Program, part may used separately permissions, entire Program remains governed License without regard additional permissions. convey copy covered work, may option remove additional permissions copy, part . (Additional permissions may written require removal certain cases modify work.) may place additional permissions material, added covered work, can give appropriate copyright permission. Notwithstanding provision License, material add covered work, may (authorized copyright holders material) supplement terms License terms: ) Disclaiming warranty limiting liability differently terms sections 15 16 License; b) Requiring preservation specified reasonable legal notices author attributions material Appropriate Legal Notices displayed works containing ; c) Prohibiting misrepresentation origin material, requiring modified versions material marked reasonable ways different original version; d) Limiting use publicity purposes names licensors authors material; e) Declining grant rights trademark law use trade names, trademarks, service marks; f) Requiring indemnification licensors authors material anyone conveys material (modified versions ) contractual assumptions liability recipient, liability contractual assumptions directly impose licensors authors. non-permissive additional terms considered “restrictions” within meaning section 10. Program received , part , contains notice stating governed License along term restriction, may remove term. license document contains restriction permits relicensing conveying License, may add covered work material governed terms license document, provided restriction survive relicensing conveying. add terms covered work accord section, must place, relevant source files, statement additional terms apply files, notice indicating find applicable terms. Additional terms, permissive non-permissive, may stated form separately written license, stated exceptions; requirements apply either way.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_8-termination","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"8. Termination","title":"GNU General Public License","text":"may propagate modify covered work except expressly provided License. attempt otherwise propagate modify void, automatically terminate rights License (including patent licenses granted third paragraph section 11). However, cease violation License, license particular copyright holder reinstated () provisionally, unless copyright holder explicitly finally terminates license, (b) permanently, copyright holder fails notify violation reasonable means prior 60 days cessation. Moreover, license particular copyright holder reinstated permanently copyright holder notifies violation reasonable means, first time received notice violation License (work) copyright holder, cure violation prior 30 days receipt notice. Termination rights section terminate licenses parties received copies rights License. rights terminated permanently reinstated, qualify receive new licenses material section 10.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_9-acceptance-not-required-for-having-copies","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"9. Acceptance Not Required for Having Copies","title":"GNU General Public License","text":"required accept License order receive run copy Program. Ancillary propagation covered work occurring solely consequence using peer--peer transmission receive copy likewise require acceptance. However, nothing License grants permission propagate modify covered work. actions infringe copyright accept License. Therefore, modifying propagating covered work, indicate acceptance License .","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_10-automatic-licensing-of-downstream-recipients","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"10. Automatic Licensing of Downstream Recipients","title":"GNU General Public License","text":"time convey covered work, recipient automatically receives license original licensors, run, modify propagate work, subject License. responsible enforcing compliance third parties License. “entity transaction” transaction transferring control organization, substantially assets one, subdividing organization, merging organizations. propagation covered work results entity transaction, party transaction receives copy work also receives whatever licenses work party’s predecessor interest give previous paragraph, plus right possession Corresponding Source work predecessor interest, predecessor can get reasonable efforts. may impose restrictions exercise rights granted affirmed License. example, may impose license fee, royalty, charge exercise rights granted License, may initiate litigation (including cross-claim counterclaim lawsuit) alleging patent claim infringed making, using, selling, offering sale, importing Program portion .","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_11-patents","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"11. Patents","title":"GNU General Public License","text":"“contributor” copyright holder authorizes use License Program work Program based. work thus licensed called contributor’s “contributor version”. contributor’s “essential patent claims” patent claims owned controlled contributor, whether already acquired hereafter acquired, infringed manner, permitted License, making, using, selling contributor version, include claims infringed consequence modification contributor version. purposes definition, “control” includes right grant patent sublicenses manner consistent requirements License. contributor grants non-exclusive, worldwide, royalty-free patent license contributor’s essential patent claims, make, use, sell, offer sale, import otherwise run, modify propagate contents contributor version. following three paragraphs, “patent license” express agreement commitment, however denominated, enforce patent (express permission practice patent covenant sue patent infringement). “grant” patent license party means make agreement commitment enforce patent party. convey covered work, knowingly relying patent license, Corresponding Source work available anyone copy, free charge terms License, publicly available network server readily accessible means, must either (1) cause Corresponding Source available, (2) arrange deprive benefit patent license particular work, (3) arrange, manner consistent requirements License, extend patent license downstream recipients. “Knowingly relying” means actual knowledge , patent license, conveying covered work country, recipient’s use covered work country, infringe one identifiable patents country reason believe valid. , pursuant connection single transaction arrangement, convey, propagate procuring conveyance , covered work, grant patent license parties receiving covered work authorizing use, propagate, modify convey specific copy covered work, patent license grant automatically extended recipients covered work works based . patent license “discriminatory” include within scope coverage, prohibits exercise , conditioned non-exercise one rights specifically granted License. may convey covered work party arrangement third party business distributing software, make payment third party based extent activity conveying work, third party grants, parties receive covered work , discriminatory patent license () connection copies covered work conveyed (copies made copies), (b) primarily connection specific products compilations contain covered work, unless entered arrangement, patent license granted, prior 28 March 2007. Nothing License shall construed excluding limiting implied license defenses infringement may otherwise available applicable patent law.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_12-no-surrender-of-others-freedom","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"12. No Surrender of Others’ Freedom","title":"GNU General Public License","text":"conditions imposed (whether court order, agreement otherwise) contradict conditions License, excuse conditions License. convey covered work satisfy simultaneously obligations License pertinent obligations, consequence may convey . example, agree terms obligate collect royalty conveying convey Program, way satisfy terms License refrain entirely conveying Program.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_13-use-with-the-gnu-affero-general-public-license","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"13. Use with the GNU Affero General Public License","title":"GNU General Public License","text":"Notwithstanding provision License, permission link combine covered work work licensed version 3 GNU Affero General Public License single combined work, convey resulting work. terms License continue apply part covered work, special requirements GNU Affero General Public License, section 13, concerning interaction network apply combination .","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_14-revised-versions-of-this-license","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"14. Revised Versions of this License","title":"GNU General Public License","text":"Free Software Foundation may publish revised /new versions GNU General Public License time time. new versions similar spirit present version, may differ detail address new problems concerns. version given distinguishing version number. Program specifies certain numbered version GNU General Public License “later version” applies , option following terms conditions either numbered version later version published Free Software Foundation. Program specify version number GNU General Public License, may choose version ever published Free Software Foundation. Program specifies proxy can decide future versions GNU General Public License can used, proxy’s public statement acceptance version permanently authorizes choose version Program. Later license versions may give additional different permissions. However, additional obligations imposed author copyright holder result choosing follow later version.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_15-disclaimer-of-warranty","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"15. Disclaimer of Warranty","title":"GNU General Public License","text":"WARRANTY PROGRAM, EXTENT PERMITTED APPLICABLE LAW. EXCEPT OTHERWISE STATED WRITING COPYRIGHT HOLDERS /PARTIES PROVIDE PROGRAM “” WITHOUT WARRANTY KIND, EITHER EXPRESSED IMPLIED, INCLUDING, LIMITED , IMPLIED WARRANTIES MERCHANTABILITY FITNESS PARTICULAR PURPOSE. ENTIRE RISK QUALITY PERFORMANCE PROGRAM . PROGRAM PROVE DEFECTIVE, ASSUME COST NECESSARY SERVICING, REPAIR CORRECTION.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_16-limitation-of-liability","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"16. Limitation of Liability","title":"GNU General Public License","text":"EVENT UNLESS REQUIRED APPLICABLE LAW AGREED WRITING COPYRIGHT HOLDER, PARTY MODIFIES /CONVEYS PROGRAM PERMITTED , LIABLE DAMAGES, INCLUDING GENERAL, SPECIAL, INCIDENTAL CONSEQUENTIAL DAMAGES ARISING USE INABILITY USE PROGRAM (INCLUDING LIMITED LOSS DATA DATA RENDERED INACCURATE LOSSES SUSTAINED THIRD PARTIES FAILURE PROGRAM OPERATE PROGRAMS), EVEN HOLDER PARTY ADVISED POSSIBILITY DAMAGES.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_17-interpretation-of-sections-15-and-16","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"17. Interpretation of Sections 15 and 16","title":"GNU General Public License","text":"disclaimer warranty limitation liability provided given local legal effect according terms, reviewing courts shall apply local law closely approximates absolute waiver civil liability connection Program, unless warranty assumption liability accompanies copy Program return fee. END TERMS CONDITIONS","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"how-to-apply-these-terms-to-your-new-programs","dir":"","previous_headings":"","what":"How to Apply These Terms to Your New Programs","title":"GNU General Public License","text":"develop new program, want greatest possible use public, best way achieve make free software everyone can redistribute change terms. , attach following notices program. safest attach start source file effectively state exclusion warranty; file least “copyright” line pointer full notice found. Also add information contact electronic paper mail. program terminal interaction, make output short notice like starts interactive mode: hypothetical commands show w show c show appropriate parts General Public License. course, program’s commands might different; GUI interface, use “box”. also get employer (work programmer) school, , sign “copyright disclaimer” program, necessary. information , apply follow GNU GPL, see . GNU General Public License permit incorporating program proprietary programs. program subroutine library, may consider useful permit linking proprietary applications library. want , use GNU Lesser General Public License instead License. first, please read .","code":" Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free software, and you are welcome to redistribute it under certain conditions; type 'show c' for details."},{"path":"https://serkor1.github.io/SLmetrics/articles/SLmetrics.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"{SLmetrics}: Machine learning performance evaluation on steroids","text":"{SLmetrics} low-level R package supervised AI/ML performance evaluation. uses {Rcpp} {RcppEigen} backend memory efficient fast execution various metrics. {SLmetrics} follows syntax base R, uses S3-classes.","code":""},{"path":"https://serkor1.github.io/SLmetrics/articles/SLmetrics.html","id":"why","dir":"Articles","previous_headings":"","what":"Why?","title":"{SLmetrics}: Machine learning performance evaluation on steroids","text":"currently packages bridges gap R Python terms AI/ML performance evaluation; {MLmetrics}, {yardstick}, {mlr3measures}{metrica}. {MLmetrics} can considered legacy code comes performance evaluation, served backend {yardstick} version 0.0.2. built entirely base R, stable since inception almost 10 years ago. However, appears development reached ’s peak currently stale - see, example, stale PR related issue. Micro- macro-averages implented {scikit-learn} many years, {MLmetrics} simply didn’t keep development. {yardstick}, hand, carried torch forward implemented modern features. {yardstick} closely follows syntax, naming functionality {scikit-learn} built {tidyverse} tools; although source code nice look , introduce serious overhead carries risk deprecations. Furthermore, complicates simple application verbose function naming, see example metric()-function metric_vec()-function - output , call different. {yardstick} can’t handle one positive class time, end-user forced run function get performance metrics adjacent classes. {SLmetrics}, name suggests, closely resembles {MLmetrics} simplicity, similarity ends. {SLmetrics} reflects simplicity application; comparing two vectors. functionality features closely follows {scikit-learn} {pytorch} - significant edge two, alongside R packages, comes speed, efficiency user-friendliness; uses c++ backend, S3-classes frontend (See speed comparison)","code":""},{"path":"https://serkor1.github.io/SLmetrics/articles/SLmetrics.html","id":"basic-usage-classification","dir":"Articles","previous_headings":"","what":"Basic usage: classification","title":"{SLmetrics}: Machine learning performance evaluation on steroids","text":"","code":"# 1) recode iris # to binary problem iris$Species <- factor( x = as.numeric( iris$Species == \"virginica\" ), levels = c(1,0), labels = c(\"virginica\", \"others\") ) # 2) fit the logistic # regression model <- glm( formula = Species ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- as.factor( ifelse( predict(model, type = \"response\") > 0.5, yes = \"virginica\", no = \"others\" ) ) # 1) construct confusion # matrix confusion_matrix <- cmatrix( actual = iris$Species, predicted = predicted ) # 2) visualize # confusion matrix plot( confusion_matrix ) # 3) summarise # confusion matrix summary( confusion_matrix ) #> Confusion Matrix (2 x 2) #> ================================================================================ #> virginica others #> virginica 35 15 #> others 14 86 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.81 #> - Balanced Accuracy: 0.78 #> - Sensitivity: 0.81 #> - Specificity: 0.81 #> - Precision: 0.81"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"setup","dir":"Articles","previous_headings":"","what":"Setup","title":"{SLmetrics}: Classification","text":"section setup essential workflow using {SLmetrics} {lightgbm}.","code":""},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"the-data","dir":"Articles","previous_headings":"Setup","what":"The data","title":"{SLmetrics}: Classification","text":"","code":"# 1) load data # from {mlbench} data(\"Glass\", package = \"mlbench\") # 1.1) define the features # and outcomes outcome <- c(\"Type\") features <- setdiff(x = colnames(Glass), y = outcome) # 2) split data in training # and test # 2.1) set seed for # for reproducibility set.seed(1903) # 2.2) exttract # indices with a simple # 80/10 split index <- sample(1:nrow(Glass), size = 0.8 * nrow(Glass)) # 1.1) extract training # data and construct # as lgb.Dataset train <- Glass[index,] dtrain <- lightgbm::lgb.Dataset( data = data.matrix(train[,features]), label = train$Type ) # 1.2) extract test # data test <- Glass[-index,] # 1.2.1) extract actual # values and constuct # as.factor for {SLmetrics} # methods actual <- as.factor( test$Type ) # 1.2.2) construct as data.matrix # for predict method test <- data.matrix( test[,features] )"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"setting-up-parameters","dir":"Articles","previous_headings":"Setup","what":"Setting up parameters","title":"{SLmetrics}: Classification","text":"","code":"# 1) define parameters # across the vignette parameters <- list( objective = \"multiclass\", num_leaves = 4L, learning_rate = 0.5, num_class = 8 )"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"evaluation-function-f-score","dir":"Articles","previous_headings":"Setup","what":"Evaluation function: F score","title":"{SLmetrics}: Classification","text":"custom evaluation function use \\(F_{score}\\) \\(\\beta = 2\\) emphasize precision imporant recall. function defined , \\[ f_\\beta = (1 + \\beta^2) \\cdot \\frac{precision \\cdot recall}{(\\beta^2 \\cdot precision) + recall} \\] fbeta()-function returns vector scores class. want maximize micro-average.","code":"# 1) define the custom # evaluation metric evaluation_metric <- function( dtrain, preds) { # 1) extract values actual <- as.factor(dtrain) predicted <- lightgbm::get_field(preds, \"label\") value <- fbeta( actual = actual, predicted = predicted, beta = 2, micro = TRUE ) # 2) construnct output # list list( name = \"fbeta\", value = value, higher_better = TRUE ) }"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"training-model","dir":"Articles","previous_headings":"","what":"Training model","title":"{SLmetrics}: Classification","text":"train model using lgb.train()-function,","code":"model <- lightgbm::lgb.train( params = parameters, data = dtrain, nrounds = 10L, eval = evaluation_metric, verbose = -1 )"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"classification","dir":"Articles","previous_headings":"Performance Evaluation","what":"Classification","title":"{SLmetrics}: Classification","text":"extract predicted classes using predict()-function,","code":"# 1) prediction # from the model predicted <- as.factor( predict( model, newdata = test, type = \"class\" ) ) # 1) construct confusion # matrix confusion_matrix <- cmatrix( actual = actual, predicted = predicted ) # 2) visualize plot( confusion_matrix ) # 3) summarize summary( confusion_matrix ) #> Confusion Matrix (6 x 6) #> ================================================================================ #> 1 2 3 5 6 7 #> 1 13 2 1 0 0 0 #> 2 1 13 1 0 0 0 #> 3 0 0 2 0 0 0 #> 5 0 0 0 0 0 0 #> 6 0 0 0 1 2 0 #> 7 0 0 0 0 0 7 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.86 #> - Balanced Accuracy: 0.87 #> - Sensitivity: 0.86 #> - Specificity: 0.97 #> - Precision: 0.86"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"response","dir":"Articles","previous_headings":"Performance Evaluation","what":"Response","title":"{SLmetrics}: Classification","text":"extract response values using predict()-function, response can passed ROC()-function, ROC()-function returns data.frame-object, 264 rows corresponding length response multiplied number classes data. roc-object can plotted follows, ROC()-function accepts custom threshold-argument, can passed follows, new object 264 rows.","code":"# 1) prediction # from the model response <- predict( model, newdata = test ) # 1) calculate the reciever # operator characteristics roc <- ROC( actual = actual, response = response ) # 2) print the roc # object print(roc) #> threshold level label fpr tpr #> 1 Inf 1 1 0.0000 0.0000 #> 2 3.60e-15 1 1 0.0370 0.0000 #> 3 2.70e-15 1 1 0.0370 0.0625 #> 4 1.46e-15 1 1 0.0741 0.0625 #> 5 1.16e-15 1 1 0.1111 0.0625 #> 6 1.15e-15 1 1 0.1481 0.0625 #> 7 1.12e-15 1 1 0.1852 0.0625 #> 8 1.03e-15 1 1 0.2222 0.0625 #> 9 9.87e-16 1 1 0.2222 0.1250 #> 10 9.45e-16 1 1 0.2593 0.1250 #> [ reached 'max' / getOption(\"max.print\") -- omitted 254 rows ] # 1) plot roc # object plot(roc) # 1) create custom # thresholds thresholds <- seq( from = 0.9, to = 0.1, length.out = 10 ) # 2) pass the custom thresholds # to the ROC()-function roc <- ROC( actual = actual, response = response, thresholds = thresholds ) # 3) print the roc # object print(roc) #> threshold level label fpr tpr #> 1 Inf 1 1 0.0000 0.0000 #> 2 3.60e-15 1 1 0.0370 0.0000 #> 3 2.70e-15 1 1 0.0370 0.0625 #> 4 1.46e-15 1 1 0.0741 0.0625 #> 5 1.16e-15 1 1 0.1111 0.0625 #> 6 1.15e-15 1 1 0.1481 0.0625 #> 7 1.12e-15 1 1 0.1852 0.0625 #> 8 1.03e-15 1 1 0.2222 0.0625 #> 9 9.87e-16 1 1 0.2222 0.1250 #> 10 9.45e-16 1 1 0.2593 0.1250 #> [ reached 'max' / getOption(\"max.print\") -- omitted 254 rows ] # 1) viasualize # ROC plot(roc) # 1) summarise ROC summary(roc) #> Reciever Operator Characteristics #> ================================================================================ #> AUC #> - 1: 0.414 #> - 2: 0.761 #> - 3: 0.524 #> - 5: 0 #> - 6: 0.846 #> - 7: 0.024"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"setup","dir":"Articles","previous_headings":"","what":"Setup","title":"{SLmetrics}: Regression","text":"section setup essential workflow using {SLmetrics} {xgboost}.","code":""},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"the-data","dir":"Articles","previous_headings":"Setup","what":"The data","title":"{SLmetrics}: Regression","text":"","code":"# 1) load data # from {mlbench} data(\"BostonHousing\", package = \"mlbench\") # 1.1) define the features # and outcomes outcome <- c(\"medv\") features <- setdiff( x = colnames(BostonHousing), y = outcome ) # 2) split data in training # and test # 2.1) set seed for # for reproducibility set.seed(1903) # 2.2) exttract # indices with a simple # 90/10 split index <- sample(1:nrow(BostonHousing), size = 0.9 * nrow(BostonHousing)) # 1.1) extract training # data and construct # as lgb.Dataset train <- BostonHousing[index,] # 1.1.1) convert # to DMatrix dtrain <- xgboost::xgb.DMatrix( data = data.matrix(train[, features]), label = data.matrix(train[, outcome]) ) # 1.2) extract test # data test <- BostonHousing[-index,] # 1.2.1) convert to DMatrix dtest <- xgboost::xgb.DMatrix( data = data.matrix(test[, features]), label = data.matrix(test[, outcome]) ) # 1.2.2) extract actual # outcome actual <- test$medv"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"set-parameters","dir":"Articles","previous_headings":"Setup","what":"Set parameters","title":"{SLmetrics}: Regression","text":"","code":"# 1) define parameters # across the vignette parameters <- list( max_depth = 2, eta = 1 )"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"evaluation-function-relative-root-mean-squared-error-rrmse","dir":"Articles","previous_headings":"Setup","what":"Evaluation function: Relative Root Mean Squared Error (RRMSE)","title":"{SLmetrics}: Regression","text":"function defined , \\[ \\text{RRMSE} = \\sqrt{\\frac{\\sum_{=1}^n (y_i - \\upsilon_i)^2}{\\sum_{=1}^n (y_i - \\bar{y})^2}} \\] \\(y_i\\) actual values, \\(\\upsilon_i\\) predicted values \\(\\bar{y}\\) mean \\(y\\).","code":"# 1) define the custom # evaluation metric evaluation_metric <- function( preds, dtrain) { # 1) extract values actual <- xgboost::getinfo(dtrain, \"label\") predicted <- preds value <- rrse( actual = actual, predicted = predicted ) # 2) construnct output # list list( metric = \"RRMSE\", value = value ) }"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"training-model","dir":"Articles","previous_headings":"","what":"Training model","title":"{SLmetrics}: Regression","text":"train model using xgb.train()-function,","code":"# 1) model training model <- xgboost::xgb.train( params = parameters, data = dtrain, nrounds = 10L, verbose = 0, feval = evaluation_metric, watchlist = list( train = dtrain, test = dtest ), maximize = FALSE )"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"performance-evaluation","dir":"Articles","previous_headings":"","what":"Performance Evaluation","title":"{SLmetrics}: Regression","text":"extract predicted values using predict()-function, summarize performance using relative root mean squared error, root mean squared error concordance correlation coefficient","code":"# 1) out of sample # prediction predicted <- predict( model, newdata = dtest ) # 1) summarize all # performance measures # in data.frame data.frame( RRMSE = rrse(actual, predicted), RMSE = rmse(actual, predicted), CCC = ccc(actual, predicted) ) #> RRMSE RMSE CCC #> 1 0.4578544 3.705342 0.8840932"},{"path":"https://serkor1.github.io/SLmetrics/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Serkan Korkmaz. Maintainer, author, copyright holder.","code":""},{"path":"https://serkor1.github.io/SLmetrics/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Korkmaz S (2025). SLmetrics: Machine Learning Performance Evaluation Steroids. R package version 0.3-1, https://serkor1.github.io/SLmetrics/.","code":"@Manual{, title = {SLmetrics: Machine Learning Performance Evaluation on Steroids}, author = {Serkan Korkmaz}, year = {2025}, note = {R package version 0.3-1}, url = {https://serkor1.github.io/SLmetrics/}, }"},{"path":"https://serkor1.github.io/SLmetrics/index.html","id":"slmetrics-aiml-performance-evaluation-","dir":"","previous_headings":"","what":"Performance Evaluation","title":"Performance Evaluation","text":"{SLmetrics} low-level R package supervised AI/ML performance evaluation. uses {Rcpp} {RcppEigen} backend memory efficient fast execution various metrics. {SLmetrics} follows syntax base R, uses S3-classes.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/index.html","id":"shield-stable-version","dir":"","previous_headings":":information_source: Installation","what":"🛡️ Stable version","title":"Performance Evaluation","text":"","code":"## install stable release devtools::install_github( repo = 'https://github.com/serkor1/SLmetrics@*release', ref = 'main' )"},{"path":"https://serkor1.github.io/SLmetrics/index.html","id":"hammer_and_wrench-development-version","dir":"","previous_headings":":information_source: Installation","what":":hammer_and_wrench: Development version","title":"Performance Evaluation","text":"","code":"## install development version devtools::install_github( repo = 'https://github.com/serkor1/SLmetrics', ref = 'development' )"},{"path":"https://serkor1.github.io/SLmetrics/index.html","id":"information_source-code-of-conduct","dir":"","previous_headings":"","what":":information_source: Code of Conduct","title":"Performance Evaluation","text":"Please note {SLmetrics} project released Contributor Code Conduct. contributing project, agree abide terms.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"ROC()-function computes tpr() fpr() thresholds provided \\(response\\)- \\(thresholds\\)-vector. function constructs data.frame() grouped \\(k\\)-classes class treated binary classification problem.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"","code":"# S3 method for class 'factor' ROC(actual, response, thresholds = NULL, ...) # S3 method for class 'factor' weighted.ROC(actual, response, w, thresholds = NULL, ...) ROC(...) weighted.ROC(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. response -vector length \\(n\\). estimated response probabilities. thresholds optional -vector non-zero length (default: NULL). ... Arguments passed methods. w -vector length \\(n\\). NULL default.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"data.frame following form, threshold Thresholds used determine tpr() fpr() level level actual label levels actual fpr false positive rate tpr true positve rate","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TN_k}{\\#TN_k+\\#FP_k} $$ \\(\\#TN_k\\) \\(\\#FP_k\\) number true negatives false positives, respectively, class \\(k\\).","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes response <-predict(model, type = \"response\") # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) generate reciever # operator characteristics roc <- ROC( actual = actual, response = response ) # 5) plot by species plot(roc) # 5.1) summarise summary(roc) #> Reciever Operator Characteristics #> ================================================================================ #> AUC #> - Others: 0.114 #> - Virginica: 0.887 # 6) provide custom # threholds roc <- ROC( actual = actual, response = response, thresholds = seq(0, 1, length.out = 4) ) # 5) plot by species plot(roc)"},{"path":"https://serkor1.github.io/SLmetrics/reference/SLmetrics-package.html","id":null,"dir":"Reference","previous_headings":"","what":"SLmetrics: Machine Learning Performance Evaluation on Steroids — SLmetrics-package","title":"SLmetrics: Machine Learning Performance Evaluation on Steroids — SLmetrics-package","text":"{SLmetrics} lightweight package written C++ supervised unsupervised Machine Learning applications. package developed two primary goals mind: memory management execution speed. functions designed internal pointers references, ensuring passed objects copied memory, resulting optimized performance.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/SLmetrics-package.html","id":"handling-of-missing-values","dir":"Reference","previous_headings":"","what":"Handling of Missing Values","title":"SLmetrics: Machine Learning Performance Evaluation on Steroids — SLmetrics-package","text":"{SLmetrics} provide explicit handling missing values either regression classification applications. Users advised ensure input data preprocessed remove impute missing values passing functions. Since package heavily relies pointers references performance, passing data missing values may lead undefined behavior, including potential crashes R session. classification metrics support micro macro averages, {SLmetrics} handle invalid values divisions zero, ensuring robust computation accurate results.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/SLmetrics-package.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"SLmetrics: Machine Learning Performance Evaluation on Steroids — SLmetrics-package","text":"Maintainer: Serkan Korkmaz serkor1@duck.com (ORCID) [copyright holder]","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"accuracy() function computes accuracy two vectors predicted observed factor() values. weighted.accuracy() function computes weighted accuracy.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"","code":"# S3 method for class 'factor' accuracy(actual, predicted, ...) # S3 method for class 'factor' weighted.accuracy(actual, predicted, w, ...) # S3 method for class 'cmatrix' accuracy(x, ...) accuracy(...) weighted.accuracy(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels ... Arguments passed methods w -vector length \\(n\\). NULL default x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"metric calculated follows, $$ \\frac{\\#TP + \\#TN}{\\#TP + \\#TN + \\#FP + \\#FN} $$ \\(\\#TP\\), \\(\\#TN\\), \\(\\#FP\\), \\(\\#FN\\) number true positives, true negatives, false positives, false negatives, respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model # performance cat( \"Accuracy\", accuracy( actual = actual, predicted = predicted ), \"Accuracy (weigthed)\", weighted.accuracy( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Accuracy #> 0.8066667 #> Accuracy (weigthed) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"baccuracy()-function computes balanced accuracy two vectors predicted observed factor() values. weighted.baccuracy() function computes weighted balanced accuracy.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"","code":"# S3 method for class 'factor' baccuracy(actual, predicted, adjust = FALSE, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.baccuracy(actual, predicted, w, adjust = FALSE, na.rm = TRUE, ...) # S3 method for class 'cmatrix' baccuracy(x, adjust = FALSE, na.rm = TRUE, ...) baccuracy(...) weighted.baccuracy(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels adjust logical value (default: FALSE). TRUE metric adjusted random chance \\(\\frac{1}{k}\\). na.rm logical values (default: TRUE). TRUE calculation metric based valid classes. ... Arguments passed methods w -vector length \\(n\\). NULL default x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"numeric-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"metric calculated follows, $$ \\frac{\\text{sensitivity} + \\text{specificty}}{2} $$ See sensitivity()- /specificity()-function details.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate the # model cat( \"Balanced accuracy\", baccuracy( actual = actual, predicted = predicted ), \"Balanced accuracy (weigthed)\", weighted.baccuracy( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Balanced accuracy #> 0.78 #> Balanced accuracy (weigthed) #> 0.748419"},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"ccc()-function computes simple weighted concordance correlation coefficient two vectors predicted observed values. weighted.ccc() function computes weighted Concordance Correlation Coefficient. correction TRUE \\(\\sigma^2\\) adjusted \\(\\frac{1-n}{n}\\) intermediate steps.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"","code":"# S3 method for class 'numeric' ccc(actual, predicted, correction = FALSE, ...) # S3 method for class 'numeric' weighted.ccc(actual, predicted, w, correction = FALSE, ...) ccc(...) weighted.ccc(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. correction vector length \\(1\\) (default: FALSE). TRUE variance covariance adjusted \\(\\frac{1-n}{n}\\) ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"metric calculated follows, $$ \\rho_c = \\frac{2 \\rho \\sigma_x \\sigma_y}{\\sigma_x^2 + \\sigma_y^2 + (\\mu_x - \\mu_y)^2} $$ \\(\\rho\\) \\(\\text{pearson correlation coefficient}\\), \\(\\sigma\\) \\(\\text{standard deviation}\\) \\(\\mu\\) simple mean actual predicted.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance cat( \"Concordance Correlation Coefficient\", ccc( actual = actual, predicted = predicted, correction = FALSE ), \"Concordance Correlation Coefficient (corrected)\", ccc( actual = actual, predicted = predicted, correction = TRUE ), \"Concordance Correlation Coefficient (weigthed)\", weighted.ccc( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg), correction = FALSE ), sep = \"\\n\" ) #> Concordance Correlation Coefficient #> 0.9299181 #> Concordance Correlation Coefficient (corrected) #> 0.9299181 #> Concordance Correlation Coefficient (weigthed) #> 0.9238419"},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"kappa()-function computes Cohen's \\(\\kappa\\), statistic measures inter-rater agreement categorical items two vectors predicted observed factor() values. weighted.ckappa() function computes weighted \\(\\kappa\\)-statistic. \\(\\beta \\neq 0\\) -diagonals confusion matrix penalized factor \\((y_{+} - y_{,-})^\\beta\\). See details.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"","code":"# S3 method for class 'factor' ckappa(actual, predicted, beta = 0, ...) # S3 method for class 'factor' weighted.ckappa(actual, predicted, w, beta = 0, ...) # S3 method for class 'cmatrix' ckappa(x, beta = 0, ...) ckappa(...) weighted.ckappa(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. beta value length 1 (default: 0). set value different zero, -diagonal confusion matrix penalized. ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"$$ \\frac{\\rho_p - \\rho_e}{1-\\rho_e} $$ \\(\\rho_p\\) empirical probability agreement predicted actual values, \\(\\rho_e\\) expected probability agreement random chance.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance with # Cohens Kappa statistic cat( \"Kappa\", ckappa( actual = actual, predicted = predicted ), \"Kappa (penalized)\", ckappa( actual = actual, predicted = predicted, beta = 2 ), \"Kappa (weigthed)\", weighted.ckappa( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Kappa #> 0.5628141 #> Kappa (penalized) #> 0.5628141 #> Kappa (weigthed) #> 0.4971626"},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":null,"dir":"Reference","previous_headings":"","what":"Confusion Matrix — cmatrix.factor","title":"Confusion Matrix — cmatrix.factor","text":"cmatrix()-function uses cross-classifying factors build confusion matrix counts combination factor levels. row matrix represents actual factor levels, column represents predicted factor levels.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Confusion Matrix — cmatrix.factor","text":"","code":"# S3 method for class 'factor' cmatrix(actual, predicted, ...) # S3 method for class 'factor' weighted.cmatrix(actual, predicted, w, ...) cmatrix(...) weighted.cmatrix(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Confusion Matrix — cmatrix.factor","text":"actual -vector length \\(n\\), \\(k\\) levels. predicted -vector length \\(n\\), \\(k\\) levels. ... Arguments passed methods. w -vector length \\(n\\) (default: NULL) passed return weighted confusion matrix.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Confusion Matrix — cmatrix.factor","text":"named \\(k\\) x \\(k\\) class","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"dimensions","dir":"Reference","previous_headings":"","what":"Dimensions","title":"Confusion Matrix — cmatrix.factor","text":"robust defensive measure misspecififying confusion matrix. arguments correctly specified, resulting confusion matrix form:","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Confusion Matrix — cmatrix.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) summarise performance # in a confusion matrix # 4.1) unweighted matrix confusion_matrix <- cmatrix( actual = actual, predicted = predicted ) # 4.1.1) summarise matrix summary( confusion_matrix ) #> Confusion Matrix (2 x 2) #> ================================================================================ #> Virginica Others #> Virginica 35 15 #> Others 14 86 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.81 #> - Balanced Accuracy: 0.78 #> - Sensitivity: 0.81 #> - Specificity: 0.81 #> - Precision: 0.81 # 4.1.2) plot confusion # matrix plot( confusion_matrix ) # 4.2) weighted matrix confusion_matrix <- weighted.cmatrix( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) # 4.2.1) summarise matrix summary( confusion_matrix ) #> Confusion Matrix (2 x 2) #> ================================================================================ #> Virginica Others #> Virginica 53.40607 20.46301 #> Others 17.21660 58.91432 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.75 #> - Balanced Accuracy: 0.75 #> - Sensitivity: 0.75 #> - Specificity: 0.75 #> - Precision: 0.75 # 4.2.1) plot confusion # matrix plot( confusion_matrix )"},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"dor()-function computes Diagnostic Odds Ratio (DOR), single indicator test performance, two vectors predicted observed factor() values. weighted.dor() function computes weighted diagnostic odds ratio. aggregate = TRUE, function returns micro-average DOR across classes \\(k\\). default, returns class-wise DOR.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"","code":"# S3 method for class 'factor' dor(actual, predicted, ...) # S3 method for class 'factor' weighted.dor(actual, predicted, w, ...) # S3 method for class 'cmatrix' dor(x, ...) dor(...) weighted.dor(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"metric calculated class \\(k\\) follows, $$ \\text{DOR}_k = \\frac{\\text{PLR}_k}{\\text{NLR}_k} $$ \\(\\text{PLR}_k\\) \\(\\text{NLR}_k\\) positive negative likelihood ratio class \\(k\\), respectively. See plr() nlr() details. aggregate = TRUE, micro-average calculated , $$ \\overline{\\text{DOR}} = \\frac{\\overline{\\text{PLR}_k}}{\\overline{\\text{NLR}_k}} $$ \\(\\overline{\\text{PLR}}\\) \\(\\overline{\\text{NLR}}\\) micro-averaged positive negative likelihood ratio, respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance # with Diagnostic Odds Ratio cat(\"Diagnostic Odds Ratio\", sep = \"\\n\") #> Diagnostic Odds Ratio dor( actual = actual, predicted = predicted ) #> [1] 14.33333 14.33333 cat(\"Diagnostic Odds Ratio (weighted)\", sep = \"\\n\") #> Diagnostic Odds Ratio (weighted) weighted.dor( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> [1] 8.930882 8.930882"},{"path":"https://serkor1.github.io/SLmetrics/reference/entropy.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the Entropy — entropy.matrix","title":"Compute the Entropy — entropy.matrix","text":"entropy() function calculates Entropy given probability distributions.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/entropy.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the Entropy — entropy.matrix","text":"","code":"# S3 method for class 'matrix' entropy(pk, axis = 0L, base = -1, ...) entropy(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/entropy.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the Entropy — entropy.matrix","text":"pk \\(n \\times k\\) -matrix predicted probabilities. \\(\\)-th row sum 1 (.e., valid probability distribution \\(k\\) classes). first column corresponds first factor level actual, second column second factor level, . axis value length 1 (Default: 0). Defines dimensions calculate entropy. 0: Total entropy, 1: row-wise, 2: column-wise base value length 1 (Default: -1). logarithmic base use. Default value specifies natural logarithms. ... Arguments passed methods","code":""},{"path":[]},{"path":[]},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"fbeta()-function computes \\(F_\\beta\\) score, weighted harmonic mean precision() recall(), two vectors predicted observed factor() values. parameter \\(\\beta\\) determines weight precision recall combined score. weighted.fbeta() function computes weighted \\(F_\\beta\\) score.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"","code":"# S3 method for class 'factor' fbeta(actual, predicted, beta = 1, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fbeta(actual, predicted, w, beta = 1, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fbeta(x, beta = 1, micro = NULL, na.rm = TRUE, ...) fbeta(...) weighted.fbeta(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. beta vector length \\(1\\) (default: \\(1\\)). micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"metric calculated class \\(k\\) follows, $$ (1 + \\beta^2) \\frac{\\text{Precision}_k \\cdot \\text{Recall}_k}{(\\beta^2 \\cdot \\text{Precision}_k) + \\text{Recall}_k} $$ precision \\(\\frac{\\#TP_k}{\\#TP_k + \\#FP_k}\\) recall (sensitivity) \\(\\frac{\\#TP_k}{\\#TP_k + \\#FN_k}\\), \\(\\beta\\) determines weight precision relative recall.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using F1-score # 4.1) unweighted F1-score fbeta( actual = actual, predicted = predicted, beta = 1 ) #> Virginica Others #> 0.7070707 0.8557214 # 4.2) weighted F1-score weighted.fbeta( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), beta = 1 ) #> Virginica Others #> 0.7392265 0.7577002 # 5) evaluate overall performance # using micro-averaged F1-score cat( \"Micro-averaged F1-score\", fbeta( actual = actual, predicted = predicted, beta = 1, micro = TRUE ), \"Micro-averaged F1-score (weighted)\", weighted.fbeta( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), beta = 1, micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged F1-score #> 0.8066667 #> Micro-averaged F1-score (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"fdr()-function computes false discovery rate (FDR), proportion false positives among predicted positives, two vectors predicted observed factor() values. weighted.fdr() function computes weighted false discovery rate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"","code":"# S3 method for class 'factor' fdr(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fdr(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fdr(x, micro = NULL, na.rm = TRUE, ...) fdr(...) weighted.fdr(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#FP_k}{\\#TP_k+\\#FP_k} $$ \\(\\#TP_k\\) \\(\\#FP_k\\) number true psotives false positives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using False Discovery Rate # 4.1) unweighted False Discovery Rate fdr( actual = actual, predicted = predicted ) #> Virginica Others #> 0.2857143 0.1485149 # 4.2) weighted False Discovery Rate weighted.fdr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.2437830 0.2577942 # 5) evaluate overall performance # using micro-averaged False Discovery Rate cat( \"Micro-averaged False Discovery Rate\", fdr( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged False Discovery Rate (weighted)\", weighted.fdr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged False Discovery Rate #> 0.1933333 #> Micro-averaged False Discovery Rate (weighted) #> 0.2511974"},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"fer()-function computes false omission rate (), proportion false negatives among predicted negatives, two vectors predicted observed factor() values. weighted.fer() function computes weighted false omission rate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"","code":"# S3 method for class 'factor' fer(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fer(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fer(x, micro = NULL, na.rm = TRUE, ...) fer(...) weighted.fer(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#FN_k}{\\#FN_k + \\#TN_k} $$ \\(\\#FN_k\\) \\(\\#TN_k\\) number false negatives true negatives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using False Omission Rate # 4.1) unweighted False Omission Rate fer( actual = actual, predicted = predicted ) #> Virginica Others #> 0.1485149 0.2857143 # 4.2) weighted False Omission Rate weighted.fer( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.2577942 0.2437830 # 5) evaluate overall performance # using micro-averaged False Omission Rate cat( \"Micro-averaged False Omission Rate\", fer( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged False Omission Rate (weighted)\", weighted.fer( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged False Omission Rate #> 0.1933333 #> Micro-averaged False Omission Rate (weighted) #> 0.2511974"},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"fmi()-function computes Fowlkes-Mallows Index (FMI), measure similarity two sets clusterings, two vectors predicted observed factor() values.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"","code":"# S3 method for class 'factor' fmi(actual, predicted, ...) # S3 method for class 'cmatrix' fmi(x, ...) fmi(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels ... Arguments passed methods x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"metric calculated class \\(k\\) follows, $$ \\sqrt{\\frac{\\#TP_k}{\\#TP_k + \\#FP_k} \\times \\frac{\\#TP_k}{\\#TP_k + \\#FN_k}} $$ \\(\\#TP_k\\), \\(\\#FP_k\\), \\(\\#FN_k\\) represent number true positives, false positives, false negatives class \\(k\\), respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance # using Fowlkes Mallows Index cat( \"Fowlkes Mallows Index\", fmi( actual = actual, predicted = predicted ), sep = \"\\n\" ) #> Fowlkes Mallows Index #> 0.717045"},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"fpr()-function computes False Positive Rate (FPR), also known fall-(fallout()), two vectors predicted observed factor() values. weighted.fpr() function computes weighted false positive rate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"","code":"# S3 method for class 'factor' fpr(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fpr(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fpr(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' fallout(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fallout(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fallout(x, micro = NULL, na.rm = TRUE, ...) fpr(...) fallout(...) weighted.fpr(...) weighted.fallout(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#FP_k}{\\#FP_k + \\#TN_k} $$ \\(\\#FP_k\\) \\(\\#TN_k\\) represent number false positives true negatives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using False Positive Rate # 4.1) unweighted False Positive Rate fpr( actual = actual, predicted = predicted ) #> Virginica Others #> 0.14 0.30 # 4.2) weighted False Positive Rate weighted.fpr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.2261447 0.2770173 # 5) evaluate overall performance # using micro-averaged False Positive Rate cat( \"Micro-averaged False Positive Rate\", fpr( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged False Positive Rate (weighted)\", weighted.fpr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged False Positive Rate #> 0.1933333 #> Micro-averaged False Positive Rate (weighted) #> 0.2511974"},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"huberloss()-function computes simple weighted huber loss predicted observed vectors. weighted.huberloss() function computes weighted Huber Loss.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"","code":"# S3 method for class 'numeric' huberloss(actual, predicted, delta = 1, ...) # S3 method for class 'numeric' weighted.huberloss(actual, predicted, w, delta = 1, ...) huberloss(...) weighted.huberloss(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. delta -vector length \\(1\\) (default: \\(1\\)). threshold value switch functions (see calculation). ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"metric calculated follows, $$ \\frac{1}{2} (y - \\upsilon)^2 ~~ |y - \\upsilon| \\leq \\delta $$ $$ \\delta |y-\\upsilon|-\\frac{1}{2} \\delta^2 ~~ \\text{otherwise} $$ \\(y\\) \\(\\upsilon\\) actual predicted values respectively. w NULL, values aggregated using weights.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) calculate the metric # with delta 0.5 huberloss( actual = actual, predicted = predicted, delta = 0.5 ) #> [1] 0.7503286 # 3) caclulate weighted # metric using arbitrary weights w <- rbeta( n = 1e3, shape1 = 10, shape2 = 2 ) huberloss( actual = actual, predicted = predicted, delta = 0.5, w = w ) #> [1] 0.7503286"},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"jaccard()-function computes Jaccard Index, also known Intersection Union, two vectors predicted observed factor() values. weighted.jaccard() function computes weighted Jaccard Index.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"","code":"# S3 method for class 'factor' jaccard(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.jaccard(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' jaccard(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' csi(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.csi(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' csi(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' tscore(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.tscore(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' tscore(x, micro = NULL, na.rm = TRUE, ...) jaccard(...) csi(...) tscore(...) weighted.jaccard(...) weighted.csi(...) weighted.tscore(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TP_k}{\\#TP_k + \\#FP_k + \\#FN_k} $$ \\(\\#TP_k\\), \\(\\#FP_k\\), \\(\\#FN_k\\) represent number true positives, false positives, false negatives class \\(k\\), respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Jaccard Index # 4.1) unweighted Jaccard Index jaccard( actual = actual, predicted = predicted ) #> Virginica Others #> 0.5468750 0.7478261 # 4.2) weighted Jaccard Index weighted.jaccard( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.5863278 0.6099174 # 5) evaluate overall performance # using micro-averaged Jaccard Index cat( \"Micro-averaged Jaccard Index\", jaccard( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Jaccard Index (weighted)\", weighted.jaccard( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Jaccard Index #> 0.6759777 #> Micro-averaged Jaccard Index (weighted) #> 0.5984687"},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the Log Loss — logloss.factor","title":"Compute the Log Loss — logloss.factor","text":"logloss() function computes Log Loss observed classes () predicted probability distributions ( matrix). weighted.logloss() function weighted version, applying observation-specific weights.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the Log Loss — logloss.factor","text":"","code":"# S3 method for class 'factor' logloss(actual, response, normalize = TRUE, ...) # S3 method for class 'factor' weighted.logloss(actual, response, w, normalize = TRUE, ...) logloss(...) weighted.logloss(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the Log Loss — logloss.factor","text":"actual vector - length \\(n\\), \\(k\\) levels response \\(n \\times k\\) -matrix predicted probabilities. \\(\\)-th row sum 1 (.e., valid probability distribution \\(k\\) classes). first column corresponds first factor level actual, second column second factor level, . normalize -value (default: TRUE). TRUE, mean cross-entropy across observations returned; otherwise, sum cross-entropies returned. ... Arguments passed methods w -vector length \\(n\\). NULL default","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the Log Loss — logloss.factor","text":"-vector length 1","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the Log Loss — logloss.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the Log Loss — logloss.factor","text":"","code":"# 1) Recode the iris data set to a binary classification problem # Here, the positive class (\"Virginica\") is coded as 1, # and the rest (\"Others\") is coded as 0. iris$species_num <- as.numeric(iris$Species == \"virginica\") # 2) Fit a logistic regression model predicting species_num from Sepal.Length & Sepal.Width model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial(link = \"logit\") ) # 3) Generate predicted classes: \"Virginica\" vs. \"Others\" predicted <- factor( as.numeric(predict(model, type = \"response\") > 0.5), levels = c(1, 0), labels = c(\"Virginica\", \"Others\") ) # 3.1) Generate actual classes actual <- factor( x = iris$species_num, levels = c(1, 0), labels = c(\"Virginica\", \"Others\") ) # For Log Loss, we need predicted probabilities for each class. # Since it's a binary model, we create a 2-column matrix: # 1st column = P(\"Virginica\") # 2nd column = P(\"Others\") = 1 - P(\"Virginica\") predicted_probs <- predict(model, type = \"response\") response_matrix <- cbind(predicted_probs, 1 - predicted_probs) # 4) Evaluate unweighted Log Loss # 'logloss' takes (actual, response_matrix, normalize=TRUE/FALSE). # The factor 'actual' must have the positive class (Virginica) as its first level. unweighted_LogLoss <- logloss( actual = actual, # factor response = response_matrix, # numeric matrix of probabilities normalize = TRUE # normalize = TRUE ) # 5) Evaluate weighted Log Loss # We introduce a weight vector, for example: weights <- iris$Petal.Length / mean(iris$Petal.Length) weighted_LogLoss <- weighted.logloss( actual = actual, response = response_matrix, w = weights, normalize = TRUE ) # 6) Print Results cat( \"Unweighted Log Loss:\", unweighted_LogLoss, \"Weighted Log Loss:\", weighted_LogLoss, sep = \"\\n\" ) #> Unweighted Log Loss: #> 0.3863304 #> Weighted Log Loss: #> 0.491474"},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"mae()-function computes mean absolute error observed predicted vectors. weighted.mae() function computes weighted mean absolute error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"","code":"# S3 method for class 'numeric' mae(actual, predicted, ...) # S3 method for class 'numeric' weighted.mae(actual, predicted, w, ...) mae(...) weighted.mae(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"metric calulated follows, $$ \\frac{\\sum_i^n |y_i - \\upsilon_i|}{n} $$","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Mean Absolute Error (MAE) cat( \"Mean Absolute Error\", mae( actual = actual, predicted = predicted, ), \"Mean Absolute Error (weighted)\", weighted.mae( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Mean Absolute Error #> 1.72274 #> Mean Absolute Error (weighted) #> 1.849613"},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"mape()-function computes mean absolute percentage error observed predicted vectors. weighted.mape() function computes weighted mean absolute percentage error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"","code":"# S3 method for class 'numeric' mape(actual, predicted, ...) # S3 method for class 'numeric' weighted.mape(actual, predicted, w, ...) mape(...) weighted.mape(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"metric calculated , $$ \\frac{1}{n} \\sum_i^n \\frac{|y_i - \\upsilon_i|}{|y_i|} $$","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Mean Absolute Percentage Error (MAPE) cat( \"Mean Absolute Percentage Error\", mape( actual = actual, predicted = predicted, ), \"Mean Absolute Percentage Error (weighted)\", weighted.mape( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Mean Absolute Percentage Error #> 0.08776196 #> Mean Absolute Percentage Error (weighted) #> 0.08574846"},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"mcc()-function computes Matthews Correlation Coefficient (MCC), also known \\(\\phi\\)-coefficient, two vectors predicted observed factor() values. weighted.mcc() function computes weighted Matthews Correlation Coefficient.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"","code":"# S3 method for class 'factor' mcc(actual, predicted, ...) # S3 method for class 'factor' weighted.mcc(actual, predicted, w, ...) # S3 method for class 'cmatrix' mcc(x, ...) # S3 method for class 'factor' phi(actual, predicted, ...) # S3 method for class 'factor' weighted.phi(actual, predicted, w, ...) # S3 method for class 'cmatrix' phi(x, ...) mcc(...) weighted.mcc(...) phi(...) weighted.phi(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels ... Arguments passed methods w -vector length \\(n\\). NULL default x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"metric calculated follows, $$ \\frac{\\#TP \\times \\#TN - \\#FP \\times \\#FN}{\\sqrt{(\\#TP + \\#FP)(\\#TP + \\#FN)(\\#TN + \\#FP)(\\#TN + \\#FN)}} $$","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate performance # using Matthews Correlation Coefficient cat( \"Matthews Correlation Coefficient\", mcc( actual = actual, predicted = predicted ), \"Matthews Correlation Coefficient (weighted)\", weighted.mcc( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Matthews Correlation Coefficient #> 0.562878 #> Matthews Correlation Coefficient (weighted) #> 0.4976298"},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"mpe()-function computes mean percentage error observed predicted vectors. weighted.mpe() function computes weighted mean percentage error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"","code":"# S3 method for class 'numeric' mpe(actual, predicted, ...) # S3 method for class 'numeric' weighted.mpe(actual, predicted, w, ...) mpe(...) weighted.mpe(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"metric calculated , $$ \\frac{1}{n} \\sum_i^n \\frac{y_i - \\upsilon_i}{y_i} $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Mean Percentage Error (MPE) cat( \"Mean Percentage Error\", mpe( actual = actual, predicted = predicted, ), \"Mean Percentage Error (weighted)\", weighted.mpe( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Mean Percentage Error #> -0.008569118 #> Mean Percentage Error (weighted) #> 1.734723e-18"},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"mse()-function computes mean squared error observed predicted vectors. weighted.mse() function computes weighted mean squared error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"","code":"# S3 method for class 'numeric' mse(actual, predicted, ...) # S3 method for class 'numeric' weighted.mse(actual, predicted, w, ...) mse(...) weighted.mse(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"metric calculated , $$ \\frac{1}{n} \\sum_i^n (y_i - \\upsilon_i)^2 $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Mean Squared Error (MSE) cat( \"Mean Squared Error\", mse( actual = actual, predicted = predicted, ), \"Mean Squared Error (weighted)\", weighted.mse( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Mean Squared Error #> 4.609201 #> Mean Squared Error (weighted) #> 5.283426"},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"nlr()-function computes negative likelihood ratio, also known likelihood ratio negative results, two vectors predicted observed factor() values. weighted.nlr() function computes weighted negative likelihood ratio.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"","code":"# S3 method for class 'factor' nlr(actual, predicted, ...) # S3 method for class 'factor' weighted.nlr(actual, predicted, w, ...) # S3 method for class 'cmatrix' nlr(x, ...) nlr(...) weighted.nlr(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{1 - \\text{Sensitivity}_k}{\\text{Specificity}_k} $$ sensitivity (true positive rate) calculated \\(\\frac{\\#TP_k}{\\#TP_k + \\#FN_k}\\) specificity (true negative rate) calculated \\(\\frac{\\#TN_k}{\\#TN_k + \\#FP_k}\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance # with class-wise negative likelihood ratios cat(\"Negative Likelihood Ratio\", sep = \"\\n\") #> Negative Likelihood Ratio nlr( actual = actual, predicted = predicted ) #> [1] 0.3488372 0.2000000 cat(\"Negative Likelihood Ratio (weighted)\", sep = \"\\n\") #> Negative Likelihood Ratio (weighted) weighted.nlr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> [1] 0.3579704 0.3127941"},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"npv()-function computes negative predictive value, also known True Negative Predictive Value, two vectors predicted observed factor() values. weighted.npv() function computes weighted negative predictive value.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"","code":"# S3 method for class 'factor' npv(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.npv(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' npv(x, micro = NULL, na.rm = TRUE, ...) npv(...) weighted.npv(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TN_k}{\\#TN_k + \\#FN_k} $$ \\(\\#TN_k\\) \\(\\#FN_k\\) number true negatives false negatives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Negative Predictive Value # 4.1) unweighted Negative Predictive Value npv( actual = actual, predicted = predicted ) #> Virginica Others #> 0.8514851 0.7142857 # 4.2) weighted Negative Predictive Value weighted.npv( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.7422058 0.7562170 # 5) evaluate overall performance # using micro-averaged Negative Predictive Value cat( \"Micro-averaged Negative Predictive Value\", npv( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Negative Predictive Value (weighted)\", weighted.npv( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Negative Predictive Value #> 0.8066667 #> Micro-averaged Negative Predictive Value (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"pinball()-function computes pinball loss observed predicted vectors. weighted.pinball() function computes weighted Pinball Loss.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"","code":"# S3 method for class 'numeric' pinball(actual, predicted, alpha = 0.5, deviance = FALSE, ...) # S3 method for class 'numeric' weighted.pinball(actual, predicted, w, alpha = 0.5, deviance = FALSE, ...) pinball(...) weighted.pinball(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. alpha -value length \\(1\\) (default: \\(0.5\\)). slope pinball loss function. deviance -value length 1 (default: FALSE). TRUE function returns \\(D^2\\) loss. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"metric calculated , $$\\text{PinballLoss}_{\\text{unweighted}} = \\frac{1}{n} \\sum_{=1}^{n} \\left[ \\alpha \\cdot \\max(0, y_i - \\hat{y}_i) - (1 - \\alpha) \\cdot \\max(0, \\hat{y}_i - y_i) \\right]$$ \\(y_i\\) actual value, \\(\\hat{y}_i\\) predicted value \\(\\alpha\\) quantile level.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Pinball Loss cat( \"Pinball Loss\", pinball( actual = actual, predicted = predicted, ), \"Pinball Loss (weighted)\", weighted.pinball( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Pinball Loss #> 0.8613701 #> Pinball Loss (weighted) #> 0.9248066"},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"plr()-function computes positive likelihood ratio, also known likelihood ratio positive results, two vectors predicted observed factor() values. weighted.plr() function computes weighted positive likelihood ratio.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"","code":"# S3 method for class 'factor' plr(actual, predicted, ...) # S3 method for class 'factor' weighted.plr(actual, predicted, w, ...) # S3 method for class 'cmatrix' plr(x, ...) plr(...) weighted.plr(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\text{Sensitivity}_k}{1 - \\text{Specificity}_k} $$ sensitivity (true positive rate) calculated \\(\\frac{\\#TP_k}{\\#TP_k + \\#FN_k}\\) specificity (true negative rate) calculated \\(\\frac{\\#TN_k}{\\#TN_k + \\#FP_k}\\). aggregate = TRUE, micro-average calculated, $$ \\frac{\\sum_{k=1}^k \\text{Sensitivity}_k}{1 - \\sum_{k=1}^k \\text{Specificity}_k} $$","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance # with class-wise positive likelihood ratios cat(\"Positive Likelihood Ratio\", sep = \"\\n\") #> Positive Likelihood Ratio plr( actual = actual, predicted = predicted ) #> [1] 5.000000 2.866667 cat(\"Positive Likelihood Ratio (weighted)\", sep = \"\\n\") #> Positive Likelihood Ratio (weighted) weighted.plr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> [1] 3.196992 2.793527"},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"prROC()-function computes precision() recall() thresholds provided \\(response\\)- \\(thresholds\\)-vector. function constructs data.frame() grouped \\(k\\)-classes class treated binary classification problem.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"","code":"# S3 method for class 'factor' prROC(actual, response, thresholds = NULL, ...) # S3 method for class 'factor' weighted.prROC(actual, response, w, thresholds = NULL, ...) prROC(...) weighted.prROC(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. response -vector length \\(n\\). estimated response probabilities. thresholds optional -vector non-zero length (default: NULL). ... Arguments passed methods. w -vector length \\(n\\). NULL default.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"data.frame following form, threshold Thresholds used determine recall() precision() level level actual label levels actual recall recall precision precision","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TN_k}{\\#TN_k+\\#FP_k} $$ \\(\\#TN_k\\) \\(\\#FP_k\\) number true negatives false positives, respectively, class \\(k\\).","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes response <- predict(model, type = \"response\") # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) generate reciever # operator characteristics roc <- prROC( actual = actual, response = response ) # 5) plot by species plot(roc) # 5.1) summarise summary(roc) #> Reciever Operator Characteristics #> ================================================================================ #> AUC #> - Others: 0.473 #> - Virginica: 0.775 # 6) provide custom # threholds roc <- prROC( actual = actual, response = response, thresholds = seq(0, 1, length.out = 4) ) # 5) plot by species plot(roc)"},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"precision()-function computes precision, also known positive predictive value (PPV), two vectors predicted observed factor() values. weighted.precision() function computes weighted precision.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"","code":"# S3 method for class 'factor' precision(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.precision(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' precision(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' ppv(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.ppv(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' ppv(x, micro = NULL, na.rm = TRUE, ...) precision(...) weighted.precision(...) ppv(...) weighted.ppv(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TP_k}{\\#TP_k + \\#FP_k} $$ \\(\\#TP_k\\) \\(\\#FP_k\\) number true positives false positives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Precision # 4.1) unweighted Precision precision( actual = actual, predicted = predicted ) #> Virginica Others #> 0.7142857 0.8514851 # 4.2) weighted Precision weighted.precision( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.7562170 0.7422058 # 5) evaluate overall performance # using micro-averaged Precision cat( \"Micro-averaged Precision\", precision( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Precision (weighted)\", weighted.precision( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Precision #> 0.8066667 #> Micro-averaged Precision (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"rae()-function calculates normalized relative absolute error predicted observed vectors. weighted.rae() function computes weigthed relative absolute error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"","code":"# S3 method for class 'numeric' rae(actual, predicted, ...) # S3 method for class 'numeric' weighted.rae(actual, predicted, w, ...) rae(...) weighted.rae(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"Relative Absolute Error (RAE) calculated : $$ \\text{RAE} = \\frac{\\sum_{=1}^n |y_i - \\upsilon_i|}{\\sum_{=1}^n |y_i - \\bar{y}|} $$ \\(y_i\\) actual values, \\(\\upsilon_i\\) predicted values, \\(\\bar{y}\\) mean actual values.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Relative Absolute Error (RAE) cat( \"Relative Absolute Error\", rae( actual = actual, predicted = predicted, ), \"Relative Absolute Error (weighted)\", weighted.rae( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Relative Absolute Error #> 0.3654168 #> Relative Absolute Error (weighted) #> 0.363789"},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"recall()-function computes recall, also known sensitivity True Positive Rate (TPR), two vectors predicted observed factor() values. weighted.recall() function computes weighted recall.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"","code":"# S3 method for class 'factor' recall(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.recall(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' recall(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' sensitivity(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.sensitivity(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' sensitivity(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' tpr(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.tpr(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' tpr(x, micro = NULL, na.rm = TRUE, ...) recall(...) sensitivity(...) tpr(...) weighted.recall(...) weighted.sensitivity(...) weighted.tpr(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TP_k}{\\#TP_k + \\#FN_k} $$ \\(\\#TP_k\\) \\(\\#FN_k\\) number true positives false negatives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Recall # 4.1) unweighted Recall recall( actual = actual, predicted = predicted ) #> Virginica Others #> 0.70 0.86 # 4.2) weighted Recall weighted.recall( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.7229827 0.7738553 # 5) evaluate overall performance # using micro-averaged Recall cat( \"Micro-averaged Recall\", recall( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Recall (weighted)\", weighted.recall( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Recall #> 0.8066667 #> Micro-averaged Recall (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"rmse()-function computes root mean squared error observed predicted vectors. weighted.rmse() function computes weighted root mean squared error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"","code":"# S3 method for class 'numeric' rmse(actual, predicted, ...) # S3 method for class 'numeric' weighted.rmse(actual, predicted, w, ...) rmse(...) weighted.rmse(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"metric calculated , $$ \\sqrt{\\frac{1}{n} \\sum_i^n (y_i - \\upsilon_i)^2} $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Root Mean Squared Error (RMSE) cat( \"Root Mean Squared Error\", rmse( actual = actual, predicted = predicted, ), \"Root Mean Squared Error (weighted)\", weighted.rmse( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Root Mean Squared Error #> 2.146905 #> Root Mean Squared Error (weighted) #> 2.29857"},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"rmsle()-function computes root mean squared logarithmic error observed predicted vectors. weighted.rmsle() function computes weighted root mean squared logarithmic error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"","code":"# S3 method for class 'numeric' rmsle(actual, predicted, ...) # S3 method for class 'numeric' weighted.rmsle(actual, predicted, w, ...) rmsle(...) weighted.rmsle(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"metric calculated , $$ \\sqrt{\\frac{1}{n} \\sum_i^n (\\log(1 + y_i) - \\log(1 + \\upsilon_i))^2} $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Root Mean Squared Logarithmic Error (RMSLE) cat( \"Root Mean Squared Logarithmic Error\", rmsle( actual = actual, predicted = predicted, ), \"Root Mean Squared Logarithmic Error (weighted)\", weighted.rmsle( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Root Mean Squared Logarithmic Error #> 0.1055744 #> Root Mean Squared Logarithmic Error (weighted) #> 0.1025173"},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"rrmse()-function computes Relative Root Mean Squared Error observed predicted vectors. weighted.rrmse() function computes weighted Relative Root Mean Squared Error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"","code":"# S3 method for class 'numeric' rrmse(actual, predicted, normalization = 1L, ...) # S3 method for class 'numeric' weighted.rrmse(actual, predicted, w, normalization = 1L, ...) rrmse(...) weighted.rrmse(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. normalization -value length \\(1\\) (default: \\(1\\)). \\(0\\): mean-normalization, \\(1\\): range-normalization, \\(2\\): IQR-normalization. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"metric calculated , $$ \\frac{RMSE}{\\gamma} $$ \\(\\gamma\\) normalization factor.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Relative Root Mean Squared Error (RRMSE) cat( \"IQR Relative Root Mean Squared Error\", rrmse( actual = actual, predicted = predicted, normalization = 2 ), \"IQR Relative Root Mean Squared Error (weighted)\", weighted.rrmse( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg), normalization = 2 ), sep = \"\\n\" ) #> IQR Relative Root Mean Squared Error #> 0.2911058 #> IQR Relative Root Mean Squared Error (weighted) #> 0.2642035"},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"rrse()-function calculates root relative squared error predicted observed vectors. weighted.rrse() function computes weighed root relative squared errorr.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"","code":"# S3 method for class 'numeric' rrse(actual, predicted, ...) # S3 method for class 'numeric' weighted.rrse(actual, predicted, w, ...) rrse(...) weighted.rrse(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"metric calculated , $$ \\text{RRSE} = \\sqrt{\\frac{\\sum_{=1}^n (y_i - \\upsilon_i)^2}{\\sum_{=1}^n (y_i - \\bar{y})^2}} $$ \\(y_i\\) actual values, \\(\\upsilon_i\\) predicted values, \\(\\bar{y}\\) mean actual values.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Relative Root Squared Errror (RRSE) cat( \"Relative Root Squared Errror\", rrse( actual = actual, predicted = predicted, ), \"Relative Root Squared Errror (weighted)\", weighted.rrse( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Relative Root Squared Errror #> 0.3619174 #> Relative Root Squared Errror (weighted) #> 0.3691304"},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(R^2\\) — rsq.numeric","title":"Compute the \\(R^2\\) — rsq.numeric","text":"rsq()-function calculates \\(R^2\\), coefficient determination, ovserved predicted vectors. default rsq() returns unadjusted \\(R^2\\). adjusted \\(R^2\\) set \\(k = \\kappa - 1\\), \\(\\kappa\\) number parameters.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(R^2\\) — rsq.numeric","text":"","code":"# S3 method for class 'numeric' rsq(actual, predicted, k = 0, ...) # S3 method for class 'numeric' weighted.rsq(actual, predicted, w, k = 0, ...) rsq(...) weighted.rsq(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(R^2\\) — rsq.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. k -vector length 1 (default: 0). \\(k>0\\) function returns adjusted \\(R^2\\). ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(R^2\\) — rsq.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(R^2\\) — rsq.numeric","text":"metric calculated follows, $$ R^2 = 1 - \\frac{\\text{SSE}}{\\text{SST}} \\frac{n-1}{n - (k + 1)} $$ \\(\\text{SSE}\\) sum squared errors, \\(\\text{SST}\\) total sum squared errors, \\(n\\) number observations, \\(k\\) number non-constant parameters.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(R^2\\) — rsq.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure in-sample performance actual <- mtcars$mpg predicted <- fitted(model) # 2) calculate performance # using R squared adjusted and # unadjused for features cat( \"Rsq\", rsq( actual = actual, predicted = fitted(model) ), \"Rsq (Adjusted)\", rsq( actual = actual, predicted = fitted(model), k = ncol(model.matrix(model)) - 1 ), sep = \"\\n\" ) #> Rsq #> 0.8690158 #> Rsq (Adjusted) #> 0.8066423"},{"path":"https://serkor1.github.io/SLmetrics/reference/setNumberThreads.html","id":null,"dir":"Reference","previous_headings":"","what":"Set the Number of Threads for Parallel Computations — setNumberThreads","title":"Set the Number of Threads for Parallel Computations — setNumberThreads","text":"function sets number threads used parallel computations. set -1, available threads utilized.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/setNumberThreads.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Set the Number of Threads for Parallel Computations — setNumberThreads","text":"","code":"setNumberThreads(value = -1L)"},{"path":"https://serkor1.github.io/SLmetrics/reference/setNumberThreads.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Set the Number of Threads for Parallel Computations — setNumberThreads","text":"value specifying number threads use (Default: -1). Default -1, uses available threads.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/setNumberThreads.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Set the Number of Threads for Parallel Computations — setNumberThreads","text":"","code":"if (FALSE) { # \\dontrun{ setNumberThreads(4) } # }"},{"path":"https://serkor1.github.io/SLmetrics/reference/setUseOpenMP.html","id":null,"dir":"Reference","previous_headings":"","what":"Enable or Disable OpenMP Parallelization — setUseOpenMP","title":"Enable or Disable OpenMP Parallelization — setUseOpenMP","text":"function allows enable disable use OpenMP parallelizing computations.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/setUseOpenMP.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Enable or Disable OpenMP Parallelization — setUseOpenMP","text":"","code":"setUseOpenMP(value = FALSE)"},{"path":"https://serkor1.github.io/SLmetrics/reference/setUseOpenMP.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Enable or Disable OpenMP Parallelization — setUseOpenMP","text":"value value length 1 (Default: FALSE). length, OpenMP used parallelize computations.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/setUseOpenMP.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Enable or Disable OpenMP Parallelization — setUseOpenMP","text":"","code":"if (FALSE) { # \\dontrun{ setUseOpenMP(TRUE) } # }"},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"smape()-function computes symmetric mean absolute percentage error observed predicted vectors. weighted.smape() function computes weighted symmetric mean absolute percentage error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"","code":"# S3 method for class 'numeric' smape(actual, predicted, ...) # S3 method for class 'numeric' weighted.smape(actual, predicted, w, ...) smape(...) weighted.smape(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"metric calculated follows, $$ \\sum_i^n \\frac{1}{n} \\frac{|y_i - \\upsilon_i|}{\\frac{|y_i|+|\\upsilon_i|}{2}} $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Symmetric Mean Absolute Percentage Error (MAPE) cat( \"Symmetric Mean Absolute Percentage Error\", mape( actual = actual, predicted = predicted, ), \"Symmetric Mean Absolute Percentage Error (weighted)\", weighted.mape( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Symmetric Mean Absolute Percentage Error #> 0.08776196 #> Symmetric Mean Absolute Percentage Error (weighted) #> 0.08574846"},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":null,"dir":"Reference","previous_headings":"","what":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":" specificity()-function computes specificity, also known True Negative Rate (TNR) selectivity, two vectors predicted observed factor() values. weighted.specificity() function computes weighted specificity.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"","code":"# S3 method for class 'factor' specificity(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.specificity(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' specificity(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' tnr(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.tnr(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' tnr(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' selectivity(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.selectivity(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' selectivity(x, micro = NULL, na.rm = TRUE, ...) specificity(...) tnr(...) selectivity(...) weighted.specificity(...) weighted.tnr(...) weighted.selectivity(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TN_k}{\\#TN_k+\\#FP_k} $$ \\(\\#TN_k\\) \\(\\#FP_k\\) number true negatives false positives, respectively, class \\(k\\).","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Specificity # 4.1) unweighted Specificity specificity( actual = actual, predicted = predicted ) #> Virginica Others #> 0.86 0.70 # 4.2) weighted Specificity weighted.specificity( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.7738553 0.7229827 # 5) evaluate overall performance # using micro-averaged Specificity cat( \"Micro-averaged Specificity\", specificity( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Specificity (weighted)\", weighted.specificity( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Specificity #> 0.8066667 #> Micro-averaged Specificity (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"zerooneloss()-function computes zero-one Loss, classification loss function calculates proportion misclassified instances two vectors predicted observed factor() values. weighted.zerooneloss() function computes weighted zero-one loss.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"","code":"# S3 method for class 'factor' zerooneloss(actual, predicted, ...) # S3 method for class 'factor' weighted.zerooneloss(actual, predicted, w, ...) # S3 method for class 'cmatrix' zerooneloss(x, ...) zerooneloss(...) weighted.zerooneloss(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels ... Arguments passed methods w -vector length \\(n\\). NULL default x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"metric calculated follows, $$ \\frac{\\#FP + \\#FN}{\\#TP + \\#TN + \\#FP + \\#FN} $$ \\(\\#TP\\), \\(\\#TN\\), \\(\\#FP\\), \\(\\#FN\\) represent true positives, true negatives, false positives, false negatives, respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model # performance using Zero-One Loss cat( \"Zero-One Loss\", zerooneloss( actual = actual, predicted = predicted ), \"Zero-One Loss (weigthed)\", weighted.zerooneloss( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Zero-One Loss #> 0.1933333 #> Zero-One Loss (weigthed) #> 0.2511974"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"version-03-1","dir":"Changelog","previous_headings":"","what":"Version 0.3-1","title":"Version 0.3-1","text":"Version 0.3-1 considered pre-release {SLmetrics}. expect breaking changes, unless major bug/issue reported nature forces breaking changes.","code":""},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"rocket-improvements-0-3-1","dir":"Changelog","previous_headings":"","what":"🚀 Improvements","title":"Version 0.3-1","text":"OpenMP Support (PR https://github.com/serkor1/SLmetrics/pull/40): {SLmetrics} now supports parallelization OpenMP. OpenMP can utilized follows:","code":"# 1) probability distribution # generator rand.sum <- function(n){ x <- sort(runif(n-1)) c(x,1) - c(0,x) } # 2) generate probability # matrix set.seed(1903) pk <- t(replicate(100,rand.sum(1e3))) # 3) Enable OpenMP SLmetrics::setUseOpenMP(TRUE) #> OpenMP usage set to: enabled system.time(SLmetrics::entropy(pk)) #> user system elapsed #> 0.001 0.001 0.001 # 3) Disable OpenMP SLmetrics::setUseOpenMP(FALSE) #> OpenMP usage set to: disabled system.time(SLmetrics::entropy(pk)) #> user system elapsed #> 0.002 0.000 0.002"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"bug-bug-fixes-0-3-1","dir":"Changelog","previous_headings":"","what":"🐛 Bug-fixes","title":"Version 0.3-1","text":"Plot-method ROC prROC (https://github.com/serkor1/SLmetrics/issues/36): Fixed bug plot.ROC() plot.prROC() panels = FALSE additional lines added plot.","code":""},{"path":[]},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"new-feature-0-3-0","dir":"Changelog","previous_headings":"","what":"New Feature","title":"Version 0.3-0","text":"Relative Root Mean Squared Error: function normalizes Root Mean Squared Error facttor. official way normalizing - {SLmetrics} RMSE can normalized using three options; mean-, range- IQR-normalization. can used follows, Log Loss: Weighted unweighted Log Loss, without normalization. function can used follows, Weighted Receiver Operator Characteristics: weighted.ROC(), function calculates weighted True Positive False Positive Rates threshold. Weighted Precision-Recall Curve: weighted.prROC(), function calculates weighted Recall Precsion threshold.","code":"# 1) define values actual <- rnorm(1e3) predicted <- actual + rnorm(1e3) # 2) calculate Relative Root Mean Squared Error cat( \"Mean Relative Root Mean Squared Error\", SLmetrics::rrmse( actual = actual, predicted = predicted, normalization = 0 ), \"Range Relative Root Mean Squared Error\", SLmetrics::rrmse( actual = actual, predicted = predicted, normalization = 1 ), \"IQR Relative Root Mean Squared Error\", SLmetrics::rrmse( actual = actual, predicted = predicted, normalization = 2 ), sep = \"\\n\" ) #> Mean Relative Root Mean Squared Error #> 40.74819 #> Range Relative Root Mean Squared Error #> 0.1556036 #> IQR Relative Root Mean Squared Error #> 0.738214 # Create factors and response probabilities actual <- factor(c(\"Class A\", \"Class B\", \"Class A\")) weights <- c(0.3,0.9,1) response <- matrix(cbind( 0.2, 0.8, 0.8, 0.2, 0.7, 0.3 ),nrow = 3, ncol = 2) cat( \"Unweighted Log Loss:\", SLmetrics::logloss( actual, response ), \"Weighted log Loss:\", SLmetrics::weighted.logloss( actual = actual, response = response, w = weights ), sep = \"\\n\" ) #> Unweighted Log Loss: #> 0.7297521 #> Weighted log Loss: #> 0.4668102"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"breaking-changes-0-3-0","dir":"Changelog","previous_headings":"","what":"Breaking Changes","title":"Version 0.3-0","text":"Weighted Confusion Matix: w-argument cmatrix() removed favor verbose weighted confusion matrix call weighted.cmatrix()-function. See , Prior version 0.3-0 weighted confusion matrix part cmatrix()-function called follows, solution, although simple, inconsistent remaining implementation weighted metrics {SLmetrics}. regain consistency simplicity weighted confusion matrix now retrieved follows,","code":"SLmetrics::cmatrix( actual = actual, predicted = predicted, w = weights ) # 1) define factors actual <- factor(sample(letters[1:3], 100, replace = TRUE)) predicted <- factor(sample(letters[1:3], 100, replace = TRUE)) weights <- runif(length(actual)) # 2) without weights SLmetrics::cmatrix( actual = actual, predicted = predicted ) #> a b c #> a 12 10 15 #> b 10 15 8 #> c 5 14 11 # 2) with weights SLmetrics::weighted.cmatrix( actual = actual, predicted = predicted, w = weights ) #> a b c #> a 3.846279 5.399945 7.226539 #> b 4.988230 7.617554 4.784221 #> c 2.959719 5.045980 4.725642"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"bug-bug-fixes-0-3-0","dir":"Changelog","previous_headings":"","what":"🐛 Bug-fixes","title":"Version 0.3-0","text":"Return named vectors: classification metrics micro == NULL returning named vectors. fixed.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"improvements-0-2-0","dir":"Changelog","previous_headings":"","what":"Improvements","title":"Version 0.2-0","text":"documentation: documentation gotten extra love, now functions formulas embedded, details section freed general description [factor] creation. make room future expansions various functions details required. weighted classification metrics: cmatrix()-function now accepts argument w sample weights; passed respective method return weighted metric. example using sample weights confusion matrix, Calculating weighted metrics manually using foo.cmatrix()-method, Please note, however, possible pass cmatix()-weighted.accurracy(), Unit-testing: functions now tested edge-cases balanced imbalanced classifcation problems, regression problems, individually. enable robust development process prevent avoidable bugs.","code":"# 1) define factors actual <- factor(sample(letters[1:3], 100, replace = TRUE)) predicted <- factor(sample(letters[1:3], 100, replace = TRUE)) weights <- runif(length(actual)) # 2) without weights SLmetrics::cmatrix( actual = actual, predicted = predicted ) #> a b c #> a 14 9 14 #> b 12 15 10 #> c 6 9 11 # 2) with weights SLmetrics::weighted.cmatrix( actual = actual, predicted = predicted, w = weights ) #> a b c #> a 6.197341 4.717194 6.122321 #> b 6.244226 7.511618 5.114025 #> c 2.417569 5.487810 5.760531 # 1) weigthed confusion matrix # and weighted accuray confusion_matrix <- SLmetrics::cmatrix( actual = actual, predicted = predicted, w = weights ) # 2) pass into accuracy # function SLmetrics::accuracy( confusion_matrix ) #> [1] 0.4 # 3) calculate the weighted # accuracy manually SLmetrics::weighted.accuracy( actual = actual, predicted = predicted, w = weights ) #> [1] 0.3927467 try( SLmetrics::weighted.accuracy( confusion_matrix ) ) #> Error in UseMethod(generic = \"weighted.accuracy\", object = ..1) : #> no applicable method for 'weighted.accuracy' applied to an object of class \"cmatrix\""},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"bug-bug-fixes-0-2-0","dir":"Changelog","previous_headings":"","what":"🐛 Bug-fixes","title":"Version 0.2-0","text":"Floating precision: Metrics give different results based method used. means foo.cmatrix() foo.factor() produce different results (See Issue https://github.com/serkor1/SLmetrics/issues/16). fixed using higher precision Rcpp::NumericMatrix instead Rcpp::IntegerMatrix. Miscalculation Confusion Matrix elements: error FN, TN, FP TP calculated fixed. issue raised bug. something caught unit-tests, total samples high spot error. , however, fixed now. means metrics uses explicitly now stable, produces desired output. Calculation Error Fowlks Mallows Index: bug calculation fmi()-function fixed. fmi()-function now correctly calculates measure. Calculation Error Pinball Deviance Concordance Correlation Coefficient: See issue https://github.com/serkor1/SLmetrics/issues/19. Switched unbiased variance calculation ccc()-function. pinball()-function missing weighted quantile function. issue now fixed. Calculation Error Balanced Accuracy: See issue https://github.com/serkor1/SLmetrics/issues/24. function now correctly adjusts random chance, result matches {scikit-learn} Calculation Error F-beta Score: See issue https://github.com/serkor1/SLmetrics/issues/23. function werent respecting na.rm micro, fixed accordingly. Calculation Error Relative Absolute Error: function incorrectly calculating means, instead sums. fixed.","code":""},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"breaking-changes-0-2-0","dir":"Changelog","previous_headings":"","what":"Breaking changes","title":"Version 0.2-0","text":"regression metrics na.rm- w-arguments removed. weighted regression metrics seperate function weighted.foo() increase consistency across metrics. See example , rrmse()-function removed favor rrse()-function. function incorrectly specified described package.","code":"# 1) define regression problem actual <- rnorm(n = 1e3) predicted <- actual + rnorm(n = 1e3) w <- runif(n = 1e3) # 2) unweighted metrics SLmetrics::rmse(actual, predicted) #> [1] 0.9989386 # 3) weighted metrics SLmetrics::weighted.rmse(actual, predicted, w = w) #> [1] 1.013139"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"general-0-1-1","dir":"Changelog","previous_headings":"","what":"General","title":"Version 0.1-1","text":"Backend changes: pair-wise metrics arer moved {Rcpp} C++, reduced execution time half. pair-wise metrics now faster.","code":""},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"improvements-0-1-1","dir":"Changelog","previous_headings":"","what":"Improvements","title":"Version 0.1-1","text":"NA-controls: pair-wise metrics doesn’t micro-argument handling missing values according C++ {Rcpp} internals. See Issue. Thank @EmilHvitfeldt pointing . now fixed functions uses na.rm-argument explicitly control . See ,","code":"# 1) define factors actual <- factor(c(\"no\", \"yes\")) predicted <- factor(c(NA, \"no\")) # 2) accuracy with na.rm = TRUE SLmetrics::accuracy( actual = actual, predicted = predicted, na.rm = TRUE ) # 2) accuracy with na.rm = FALSE SLmetrics::accuracy( actual = actual, predicted = predicted, na.rm = FALSE )"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"bug-bug-fixes-0-1-1","dir":"Changelog","previous_headings":"","what":"🐛 Bug-fixes","title":"Version 0.1-1","text":"plot.prROC()- plot.ROC()-functions now adds line plot panels = FALSE. See Issue https://github.com/serkor1/SLmetrics/issues/9.","code":"# 1) define actual # classes actual <- factor( sample(letters[1:2], size = 100, replace = TRUE) ) # 2) define response # probabilities response <- runif(100) # 3) calculate # ROC and prROC # 3.1) ROC roc <- SLmetrics::ROC( actual, response ) # 3.2) prROC prroc <- SLmetrics::prROC( actual, response ) # 4) plot with panels # FALSE par(mfrow = c(1,2)) plot( roc, panels = FALSE ) plot( prroc, panels = FALSE )"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"general-0-1-0","dir":"Changelog","previous_headings":"","what":"General","title":"Version 0.1-0","text":"{SLmetrics} collection Machine Learning performance evaluation functions supervised learning. Visit online documentation GitHub Pages.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"supervised-classification-metrics-0-1-0","dir":"Changelog","previous_headings":"Examples","what":"Supervised classification metrics","title":"Version 0.1-0","text":"","code":"# 1) actual classes print( actual <- factor( sample(letters[1:3], size = 10, replace = TRUE) ) ) #> [1] a b a c b a a a c b #> Levels: a b c # 2) predicted classes print( predicted <- factor( sample(letters[1:3], size = 10, replace = TRUE) ) ) #> [1] b a c c c c c c a a #> Levels: a b c # 1) calculate confusion # matrix and summarise # it summary( confusion_matrix <- SLmetrics::cmatrix( actual = actual, predicted = predicted ) ) #> Confusion Matrix (3 x 3) #> ================================================================================ #> a b c #> a 0 1 4 #> b 2 0 1 #> c 1 0 1 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.10 #> - Balanced Accuracy: 0.17 #> - Sensitivity: 0.10 #> - Specificity: 0.55 #> - Precision: 0.10 # 2) calculate false positive # rate using micro average SLmetrics::fpr( confusion_matrix ) #> a b c #> 0.6000000 0.1428571 0.6250000"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"supervised-regression-metrics-0-1-0","dir":"Changelog","previous_headings":"Examples","what":"Supervised regression metrics","title":"Version 0.1-0","text":"","code":"# 1) actual values actual <- rnorm(n = 100) # 2) predicted values predicted <- actual + rnorm(n = 100) # 1) calculate # huber loss SLmetrics::huberloss( actual = actual, predicted = predicted ) #> [1] 0.4389594"}] +[{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"our-pledge","dir":"","previous_headings":"","what":"Our Pledge","title":"Contributor Covenant Code of Conduct","text":"members, contributors, leaders pledge make participation community harassment-free experience everyone, regardless age, body size, visible invisible disability, ethnicity, sex characteristics, gender identity expression, level experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, sexual identity orientation. pledge act interact ways contribute open, welcoming, diverse, inclusive, healthy community.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"our-standards","dir":"","previous_headings":"","what":"Our Standards","title":"Contributor Covenant Code of Conduct","text":"Examples behavior contributes positive environment community include: Demonstrating empathy kindness toward people respectful differing opinions, viewpoints, experiences Giving gracefully accepting constructive feedback Accepting responsibility apologizing affected mistakes, learning experience Focusing best just us individuals, overall community Examples unacceptable behavior include: use sexualized language imagery, sexual attention advances kind Trolling, insulting derogatory comments, personal political attacks Public private harassment Publishing others’ private information, physical email address, without explicit permission conduct reasonably considered inappropriate professional setting","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"enforcement-responsibilities","dir":"","previous_headings":"","what":"Enforcement Responsibilities","title":"Contributor Covenant Code of Conduct","text":"Community leaders responsible clarifying enforcing standards acceptable behavior take appropriate fair corrective action response behavior deem inappropriate, threatening, offensive, harmful. Community leaders right responsibility remove, edit, reject comments, commits, code, wiki edits, issues, contributions aligned Code Conduct, communicate reasons moderation decisions appropriate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"scope","dir":"","previous_headings":"","what":"Scope","title":"Contributor Covenant Code of Conduct","text":"Code Conduct applies within community spaces, also applies individual officially representing community public spaces. Examples representing community include using official e-mail address, posting via official social media account, acting appointed representative online offline event.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"enforcement","dir":"","previous_headings":"","what":"Enforcement","title":"Contributor Covenant Code of Conduct","text":"Instances abusive, harassing, otherwise unacceptable behavior may reported community leaders responsible enforcement serkor1@duck.com. complaints reviewed investigated promptly fairly. community leaders obligated respect privacy security reporter incident.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"enforcement-guidelines","dir":"","previous_headings":"","what":"Enforcement Guidelines","title":"Contributor Covenant Code of Conduct","text":"Community leaders follow Community Impact Guidelines determining consequences action deem violation Code Conduct:","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"id_1-correction","dir":"","previous_headings":"Enforcement Guidelines","what":"1. Correction","title":"Contributor Covenant Code of Conduct","text":"Community Impact: Use inappropriate language behavior deemed unprofessional unwelcome community. Consequence: private, written warning community leaders, providing clarity around nature violation explanation behavior inappropriate. public apology may requested.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"id_2-warning","dir":"","previous_headings":"Enforcement Guidelines","what":"2. Warning","title":"Contributor Covenant Code of Conduct","text":"Community Impact: violation single incident series actions. Consequence: warning consequences continued behavior. interaction people involved, including unsolicited interaction enforcing Code Conduct, specified period time. includes avoiding interactions community spaces well external channels like social media. Violating terms may lead temporary permanent ban.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"id_3-temporary-ban","dir":"","previous_headings":"Enforcement Guidelines","what":"3. Temporary Ban","title":"Contributor Covenant Code of Conduct","text":"Community Impact: serious violation community standards, including sustained inappropriate behavior. Consequence: temporary ban sort interaction public communication community specified period time. public private interaction people involved, including unsolicited interaction enforcing Code Conduct, allowed period. Violating terms may lead permanent ban.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"id_4-permanent-ban","dir":"","previous_headings":"Enforcement Guidelines","what":"4. Permanent Ban","title":"Contributor Covenant Code of Conduct","text":"Community Impact: Demonstrating pattern violation community standards, including sustained inappropriate behavior, harassment individual, aggression toward disparagement classes individuals. Consequence: permanent ban sort public interaction within community.","code":""},{"path":"https://serkor1.github.io/SLmetrics/CODE_OF_CONDUCT.html","id":"attribution","dir":"","previous_headings":"","what":"Attribution","title":"Contributor Covenant Code of Conduct","text":"Code Conduct adapted Contributor Covenant, version 2.1, available https://www.contributor-covenant.org/version/2/1/code_of_conduct.html. Community Impact Guidelines inspired [Mozilla’s code conduct enforcement ladder][https://github.com/mozilla/inclusion]. answers common questions code conduct, see FAQ https://www.contributor-covenant.org/faq. Translations available https://www.contributor-covenant.org/translations.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":null,"dir":"","previous_headings":"","what":"GNU General Public License","title":"GNU General Public License","text":"Version 3, 29 June 2007Copyright © 2007 Free Software Foundation, Inc.  Everyone permitted copy distribute verbatim copies license document, changing allowed.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"preamble","dir":"","previous_headings":"","what":"Preamble","title":"GNU General Public License","text":"GNU General Public License free, copyleft license software kinds works. licenses software practical works designed take away freedom share change works. contrast, GNU General Public License intended guarantee freedom share change versions program–make sure remains free software users. , Free Software Foundation, use GNU General Public License software; applies also work released way authors. can apply programs, . speak free software, referring freedom, price. General Public Licenses designed make sure freedom distribute copies free software (charge wish), receive source code can get want , can change software use pieces new free programs, know can things. protect rights, need prevent others denying rights asking surrender rights. Therefore, certain responsibilities distribute copies software, modify : responsibilities respect freedom others. example, distribute copies program, whether gratis fee, must pass recipients freedoms received. must make sure , , receive can get source code. must show terms know rights. Developers use GNU GPL protect rights two steps: (1) assert copyright software, (2) offer License giving legal permission copy, distribute /modify . developers’ authors’ protection, GPL clearly explains warranty free software. users’ authors’ sake, GPL requires modified versions marked changed, problems attributed erroneously authors previous versions. devices designed deny users access install run modified versions software inside , although manufacturer can . fundamentally incompatible aim protecting users’ freedom change software. systematic pattern abuse occurs area products individuals use, precisely unacceptable. Therefore, designed version GPL prohibit practice products. problems arise substantially domains, stand ready extend provision domains future versions GPL, needed protect freedom users. Finally, every program threatened constantly software patents. States allow patents restrict development use software general-purpose computers, , wish avoid special danger patents applied free program make effectively proprietary. prevent , GPL assures patents used render program non-free. precise terms conditions copying, distribution modification follow.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_0-definitions","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"0. Definitions","title":"GNU General Public License","text":"“License” refers version 3 GNU General Public License. “Copyright” also means copyright-like laws apply kinds works, semiconductor masks. “Program” refers copyrightable work licensed License. licensee addressed “”. “Licensees” “recipients” may individuals organizations. “modify” work means copy adapt part work fashion requiring copyright permission, making exact copy. resulting work called “modified version” earlier work work “based ” earlier work. “covered work” means either unmodified Program work based Program. “propagate” work means anything , without permission, make directly secondarily liable infringement applicable copyright law, except executing computer modifying private copy. Propagation includes copying, distribution (without modification), making available public, countries activities well. “convey” work means kind propagation enables parties make receive copies. Mere interaction user computer network, transfer copy, conveying. interactive user interface displays “Appropriate Legal Notices” extent includes convenient prominently visible feature (1) displays appropriate copyright notice, (2) tells user warranty work (except extent warranties provided), licensees may convey work License, view copy License. interface presents list user commands options, menu, prominent item list meets criterion.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_1-source-code","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"1. Source Code","title":"GNU General Public License","text":"“source code” work means preferred form work making modifications . “Object code” means non-source form work. “Standard Interface” means interface either official standard defined recognized standards body, , case interfaces specified particular programming language, one widely used among developers working language. “System Libraries” executable work include anything, work whole, () included normal form packaging Major Component, part Major Component, (b) serves enable use work Major Component, implement Standard Interface implementation available public source code form. “Major Component”, context, means major essential component (kernel, window system, ) specific operating system () executable work runs, compiler used produce work, object code interpreter used run . “Corresponding Source” work object code form means source code needed generate, install, (executable work) run object code modify work, including scripts control activities. However, include work’s System Libraries, general-purpose tools generally available free programs used unmodified performing activities part work. example, Corresponding Source includes interface definition files associated source files work, source code shared libraries dynamically linked subprograms work specifically designed require, intimate data communication control flow subprograms parts work. Corresponding Source need include anything users can regenerate automatically parts Corresponding Source. Corresponding Source work source code form work.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_2-basic-permissions","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"2. Basic Permissions","title":"GNU General Public License","text":"rights granted License granted term copyright Program, irrevocable provided stated conditions met. License explicitly affirms unlimited permission run unmodified Program. output running covered work covered License output, given content, constitutes covered work. License acknowledges rights fair use equivalent, provided copyright law. may make, run propagate covered works convey, without conditions long license otherwise remains force. may convey covered works others sole purpose make modifications exclusively , provide facilities running works, provided comply terms License conveying material control copyright. thus making running covered works must exclusively behalf, direction control, terms prohibit making copies copyrighted material outside relationship . Conveying circumstances permitted solely conditions stated . Sublicensing allowed; section 10 makes unnecessary.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_3-protecting-users-legal-rights-from-anti-circumvention-law","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"3. Protecting Users’ Legal Rights From Anti-Circumvention Law","title":"GNU General Public License","text":"covered work shall deemed part effective technological measure applicable law fulfilling obligations article 11 WIPO copyright treaty adopted 20 December 1996, similar laws prohibiting restricting circumvention measures. convey covered work, waive legal power forbid circumvention technological measures extent circumvention effected exercising rights License respect covered work, disclaim intention limit operation modification work means enforcing, work’s users, third parties’ legal rights forbid circumvention technological measures.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_4-conveying-verbatim-copies","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"4. Conveying Verbatim Copies","title":"GNU General Public License","text":"may convey verbatim copies Program’s source code receive , medium, provided conspicuously appropriately publish copy appropriate copyright notice; keep intact notices stating License non-permissive terms added accord section 7 apply code; keep intact notices absence warranty; give recipients copy License along Program. may charge price price copy convey, may offer support warranty protection fee.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_5-conveying-modified-source-versions","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"5. Conveying Modified Source Versions","title":"GNU General Public License","text":"may convey work based Program, modifications produce Program, form source code terms section 4, provided also meet conditions: ) work must carry prominent notices stating modified , giving relevant date. b) work must carry prominent notices stating released License conditions added section 7. requirement modifies requirement section 4 “keep intact notices”. c) must license entire work, whole, License anyone comes possession copy. License therefore apply, along applicable section 7 additional terms, whole work, parts, regardless packaged. License gives permission license work way, invalidate permission separately received . d) work interactive user interfaces, must display Appropriate Legal Notices; however, Program interactive interfaces display Appropriate Legal Notices, work need make . compilation covered work separate independent works, nature extensions covered work, combined form larger program, volume storage distribution medium, called “aggregate” compilation resulting copyright used limit access legal rights compilation’s users beyond individual works permit. Inclusion covered work aggregate cause License apply parts aggregate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_6-conveying-non-source-forms","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"6. Conveying Non-Source Forms","title":"GNU General Public License","text":"may convey covered work object code form terms sections 4 5, provided also convey machine-readable Corresponding Source terms License, one ways: ) Convey object code , embodied , physical product (including physical distribution medium), accompanied Corresponding Source fixed durable physical medium customarily used software interchange. b) Convey object code , embodied , physical product (including physical distribution medium), accompanied written offer, valid least three years valid long offer spare parts customer support product model, give anyone possesses object code either (1) copy Corresponding Source software product covered License, durable physical medium customarily used software interchange, price reasonable cost physically performing conveying source, (2) access copy Corresponding Source network server charge. c) Convey individual copies object code copy written offer provide Corresponding Source. alternative allowed occasionally noncommercially, received object code offer, accord subsection 6b. d) Convey object code offering access designated place (gratis charge), offer equivalent access Corresponding Source way place charge. need require recipients copy Corresponding Source along object code. place copy object code network server, Corresponding Source may different server (operated third party) supports equivalent copying facilities, provided maintain clear directions next object code saying find Corresponding Source. Regardless server hosts Corresponding Source, remain obligated ensure available long needed satisfy requirements. e) Convey object code using peer--peer transmission, provided inform peers object code Corresponding Source work offered general public charge subsection 6d. separable portion object code, whose source code excluded Corresponding Source System Library, need included conveying object code work. “User Product” either (1) “consumer product”, means tangible personal property normally used personal, family, household purposes, (2) anything designed sold incorporation dwelling. determining whether product consumer product, doubtful cases shall resolved favor coverage. particular product received particular user, “normally used” refers typical common use class product, regardless status particular user way particular user actually uses, expects expected use, product. product consumer product regardless whether product substantial commercial, industrial non-consumer uses, unless uses represent significant mode use product. “Installation Information” User Product means methods, procedures, authorization keys, information required install execute modified versions covered work User Product modified version Corresponding Source. information must suffice ensure continued functioning modified object code case prevented interfered solely modification made. convey object code work section , , specifically use , User Product, conveying occurs part transaction right possession use User Product transferred recipient perpetuity fixed term (regardless transaction characterized), Corresponding Source conveyed section must accompanied Installation Information. requirement apply neither third party retains ability install modified object code User Product (example, work installed ROM). requirement provide Installation Information include requirement continue provide support service, warranty, updates work modified installed recipient, User Product modified installed. Access network may denied modification materially adversely affects operation network violates rules protocols communication across network. Corresponding Source conveyed, Installation Information provided, accord section must format publicly documented (implementation available public source code form), must require special password key unpacking, reading copying.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_7-additional-terms","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"7. Additional Terms","title":"GNU General Public License","text":"“Additional permissions” terms supplement terms License making exceptions one conditions. Additional permissions applicable entire Program shall treated though included License, extent valid applicable law. additional permissions apply part Program, part may used separately permissions, entire Program remains governed License without regard additional permissions. convey copy covered work, may option remove additional permissions copy, part . (Additional permissions may written require removal certain cases modify work.) may place additional permissions material, added covered work, can give appropriate copyright permission. Notwithstanding provision License, material add covered work, may (authorized copyright holders material) supplement terms License terms: ) Disclaiming warranty limiting liability differently terms sections 15 16 License; b) Requiring preservation specified reasonable legal notices author attributions material Appropriate Legal Notices displayed works containing ; c) Prohibiting misrepresentation origin material, requiring modified versions material marked reasonable ways different original version; d) Limiting use publicity purposes names licensors authors material; e) Declining grant rights trademark law use trade names, trademarks, service marks; f) Requiring indemnification licensors authors material anyone conveys material (modified versions ) contractual assumptions liability recipient, liability contractual assumptions directly impose licensors authors. non-permissive additional terms considered “restrictions” within meaning section 10. Program received , part , contains notice stating governed License along term restriction, may remove term. license document contains restriction permits relicensing conveying License, may add covered work material governed terms license document, provided restriction survive relicensing conveying. add terms covered work accord section, must place, relevant source files, statement additional terms apply files, notice indicating find applicable terms. Additional terms, permissive non-permissive, may stated form separately written license, stated exceptions; requirements apply either way.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_8-termination","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"8. Termination","title":"GNU General Public License","text":"may propagate modify covered work except expressly provided License. attempt otherwise propagate modify void, automatically terminate rights License (including patent licenses granted third paragraph section 11). However, cease violation License, license particular copyright holder reinstated () provisionally, unless copyright holder explicitly finally terminates license, (b) permanently, copyright holder fails notify violation reasonable means prior 60 days cessation. Moreover, license particular copyright holder reinstated permanently copyright holder notifies violation reasonable means, first time received notice violation License (work) copyright holder, cure violation prior 30 days receipt notice. Termination rights section terminate licenses parties received copies rights License. rights terminated permanently reinstated, qualify receive new licenses material section 10.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_9-acceptance-not-required-for-having-copies","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"9. Acceptance Not Required for Having Copies","title":"GNU General Public License","text":"required accept License order receive run copy Program. Ancillary propagation covered work occurring solely consequence using peer--peer transmission receive copy likewise require acceptance. However, nothing License grants permission propagate modify covered work. actions infringe copyright accept License. Therefore, modifying propagating covered work, indicate acceptance License .","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_10-automatic-licensing-of-downstream-recipients","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"10. Automatic Licensing of Downstream Recipients","title":"GNU General Public License","text":"time convey covered work, recipient automatically receives license original licensors, run, modify propagate work, subject License. responsible enforcing compliance third parties License. “entity transaction” transaction transferring control organization, substantially assets one, subdividing organization, merging organizations. propagation covered work results entity transaction, party transaction receives copy work also receives whatever licenses work party’s predecessor interest give previous paragraph, plus right possession Corresponding Source work predecessor interest, predecessor can get reasonable efforts. may impose restrictions exercise rights granted affirmed License. example, may impose license fee, royalty, charge exercise rights granted License, may initiate litigation (including cross-claim counterclaim lawsuit) alleging patent claim infringed making, using, selling, offering sale, importing Program portion .","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_11-patents","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"11. Patents","title":"GNU General Public License","text":"“contributor” copyright holder authorizes use License Program work Program based. work thus licensed called contributor’s “contributor version”. contributor’s “essential patent claims” patent claims owned controlled contributor, whether already acquired hereafter acquired, infringed manner, permitted License, making, using, selling contributor version, include claims infringed consequence modification contributor version. purposes definition, “control” includes right grant patent sublicenses manner consistent requirements License. contributor grants non-exclusive, worldwide, royalty-free patent license contributor’s essential patent claims, make, use, sell, offer sale, import otherwise run, modify propagate contents contributor version. following three paragraphs, “patent license” express agreement commitment, however denominated, enforce patent (express permission practice patent covenant sue patent infringement). “grant” patent license party means make agreement commitment enforce patent party. convey covered work, knowingly relying patent license, Corresponding Source work available anyone copy, free charge terms License, publicly available network server readily accessible means, must either (1) cause Corresponding Source available, (2) arrange deprive benefit patent license particular work, (3) arrange, manner consistent requirements License, extend patent license downstream recipients. “Knowingly relying” means actual knowledge , patent license, conveying covered work country, recipient’s use covered work country, infringe one identifiable patents country reason believe valid. , pursuant connection single transaction arrangement, convey, propagate procuring conveyance , covered work, grant patent license parties receiving covered work authorizing use, propagate, modify convey specific copy covered work, patent license grant automatically extended recipients covered work works based . patent license “discriminatory” include within scope coverage, prohibits exercise , conditioned non-exercise one rights specifically granted License. may convey covered work party arrangement third party business distributing software, make payment third party based extent activity conveying work, third party grants, parties receive covered work , discriminatory patent license () connection copies covered work conveyed (copies made copies), (b) primarily connection specific products compilations contain covered work, unless entered arrangement, patent license granted, prior 28 March 2007. Nothing License shall construed excluding limiting implied license defenses infringement may otherwise available applicable patent law.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_12-no-surrender-of-others-freedom","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"12. No Surrender of Others’ Freedom","title":"GNU General Public License","text":"conditions imposed (whether court order, agreement otherwise) contradict conditions License, excuse conditions License. convey covered work satisfy simultaneously obligations License pertinent obligations, consequence may convey . example, agree terms obligate collect royalty conveying convey Program, way satisfy terms License refrain entirely conveying Program.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_13-use-with-the-gnu-affero-general-public-license","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"13. Use with the GNU Affero General Public License","title":"GNU General Public License","text":"Notwithstanding provision License, permission link combine covered work work licensed version 3 GNU Affero General Public License single combined work, convey resulting work. terms License continue apply part covered work, special requirements GNU Affero General Public License, section 13, concerning interaction network apply combination .","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_14-revised-versions-of-this-license","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"14. Revised Versions of this License","title":"GNU General Public License","text":"Free Software Foundation may publish revised /new versions GNU General Public License time time. new versions similar spirit present version, may differ detail address new problems concerns. version given distinguishing version number. Program specifies certain numbered version GNU General Public License “later version” applies , option following terms conditions either numbered version later version published Free Software Foundation. Program specify version number GNU General Public License, may choose version ever published Free Software Foundation. Program specifies proxy can decide future versions GNU General Public License can used, proxy’s public statement acceptance version permanently authorizes choose version Program. Later license versions may give additional different permissions. However, additional obligations imposed author copyright holder result choosing follow later version.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_15-disclaimer-of-warranty","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"15. Disclaimer of Warranty","title":"GNU General Public License","text":"WARRANTY PROGRAM, EXTENT PERMITTED APPLICABLE LAW. EXCEPT OTHERWISE STATED WRITING COPYRIGHT HOLDERS /PARTIES PROVIDE PROGRAM “” WITHOUT WARRANTY KIND, EITHER EXPRESSED IMPLIED, INCLUDING, LIMITED , IMPLIED WARRANTIES MERCHANTABILITY FITNESS PARTICULAR PURPOSE. ENTIRE RISK QUALITY PERFORMANCE PROGRAM . PROGRAM PROVE DEFECTIVE, ASSUME COST NECESSARY SERVICING, REPAIR CORRECTION.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_16-limitation-of-liability","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"16. Limitation of Liability","title":"GNU General Public License","text":"EVENT UNLESS REQUIRED APPLICABLE LAW AGREED WRITING COPYRIGHT HOLDER, PARTY MODIFIES /CONVEYS PROGRAM PERMITTED , LIABLE DAMAGES, INCLUDING GENERAL, SPECIAL, INCIDENTAL CONSEQUENTIAL DAMAGES ARISING USE INABILITY USE PROGRAM (INCLUDING LIMITED LOSS DATA DATA RENDERED INACCURATE LOSSES SUSTAINED THIRD PARTIES FAILURE PROGRAM OPERATE PROGRAMS), EVEN HOLDER PARTY ADVISED POSSIBILITY DAMAGES.","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"id_17-interpretation-of-sections-15-and-16","dir":"","previous_headings":"TERMS AND CONDITIONS","what":"17. Interpretation of Sections 15 and 16","title":"GNU General Public License","text":"disclaimer warranty limitation liability provided given local legal effect according terms, reviewing courts shall apply local law closely approximates absolute waiver civil liability connection Program, unless warranty assumption liability accompanies copy Program return fee. END TERMS CONDITIONS","code":""},{"path":"https://serkor1.github.io/SLmetrics/LICENSE.html","id":"how-to-apply-these-terms-to-your-new-programs","dir":"","previous_headings":"","what":"How to Apply These Terms to Your New Programs","title":"GNU General Public License","text":"develop new program, want greatest possible use public, best way achieve make free software everyone can redistribute change terms. , attach following notices program. safest attach start source file effectively state exclusion warranty; file least “copyright” line pointer full notice found. Also add information contact electronic paper mail. program terminal interaction, make output short notice like starts interactive mode: hypothetical commands show w show c show appropriate parts General Public License. course, program’s commands might different; GUI interface, use “box”. also get employer (work programmer) school, , sign “copyright disclaimer” program, necessary. information , apply follow GNU GPL, see . GNU General Public License permit incorporating program proprietary programs. program subroutine library, may consider useful permit linking proprietary applications library. want , use GNU Lesser General Public License instead License. first, please read .","code":" Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free software, and you are welcome to redistribute it under certain conditions; type 'show c' for details."},{"path":"https://serkor1.github.io/SLmetrics/articles/SLmetrics.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"{SLmetrics}: Machine learning performance evaluation on steroids","text":"{SLmetrics} low-level R package supervised AI/ML performance evaluation. uses {Rcpp} {RcppEigen} backend memory efficient fast execution various metrics. {SLmetrics} follows syntax base R, uses S3-classes.","code":""},{"path":"https://serkor1.github.io/SLmetrics/articles/SLmetrics.html","id":"why","dir":"Articles","previous_headings":"","what":"Why?","title":"{SLmetrics}: Machine learning performance evaluation on steroids","text":"currently packages bridges gap R Python terms AI/ML performance evaluation; {MLmetrics}, {yardstick}, {mlr3measures}{metrica}. {MLmetrics} can considered legacy code comes performance evaluation, served backend {yardstick} version 0.0.2. built entirely base R, stable since inception almost 10 years ago. However, appears development reached ’s peak currently stale - see, example, stale PR related issue. Micro- macro-averages implented {scikit-learn} many years, {MLmetrics} simply didn’t keep development. {yardstick}, hand, carried torch forward implemented modern features. {yardstick} closely follows syntax, naming functionality {scikit-learn} built {tidyverse} tools; although source code nice look , introduce serious overhead carries risk deprecations. Furthermore, complicates simple application verbose function naming, see example metric()-function metric_vec()-function - output , call different. {yardstick} can’t handle one positive class time, end-user forced run function get performance metrics adjacent classes. {SLmetrics}, name suggests, closely resembles {MLmetrics} simplicity, similarity ends. {SLmetrics} reflects simplicity application; comparing two vectors. functionality features closely follows {scikit-learn} {pytorch} - significant edge two, alongside R packages, comes speed, efficiency user-friendliness; uses c++ backend, S3-classes frontend (See speed comparison)","code":""},{"path":"https://serkor1.github.io/SLmetrics/articles/SLmetrics.html","id":"basic-usage-classification","dir":"Articles","previous_headings":"","what":"Basic usage: classification","title":"{SLmetrics}: Machine learning performance evaluation on steroids","text":"","code":"# 1) recode iris # to binary problem iris$Species <- factor( x = as.numeric( iris$Species == \"virginica\" ), levels = c(1,0), labels = c(\"virginica\", \"others\") ) # 2) fit the logistic # regression model <- glm( formula = Species ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- as.factor( ifelse( predict(model, type = \"response\") > 0.5, yes = \"virginica\", no = \"others\" ) ) # 1) construct confusion # matrix confusion_matrix <- cmatrix( actual = iris$Species, predicted = predicted ) # 2) visualize # confusion matrix plot( confusion_matrix ) # 3) summarise # confusion matrix summary( confusion_matrix ) #> Confusion Matrix (2 x 2) #> ================================================================================ #> virginica others #> virginica 35 15 #> others 14 86 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.81 #> - Balanced Accuracy: 0.78 #> - Sensitivity: 0.81 #> - Specificity: 0.81 #> - Precision: 0.81"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"setup","dir":"Articles","previous_headings":"","what":"Setup","title":"{SLmetrics}: Classification","text":"section setup essential workflow using {SLmetrics} {lightgbm}.","code":""},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"the-data","dir":"Articles","previous_headings":"Setup","what":"The data","title":"{SLmetrics}: Classification","text":"","code":"# 1) load data # from {mlbench} data(\"Glass\", package = \"mlbench\") # 1.1) define the features # and outcomes outcome <- c(\"Type\") features <- setdiff(x = colnames(Glass), y = outcome) # 2) split data in training # and test # 2.1) set seed for # for reproducibility set.seed(1903) # 2.2) exttract # indices with a simple # 80/10 split index <- sample(1:nrow(Glass), size = 0.8 * nrow(Glass)) # 1.1) extract training # data and construct # as lgb.Dataset train <- Glass[index,] dtrain <- lightgbm::lgb.Dataset( data = data.matrix(train[,features]), label = train$Type ) # 1.2) extract test # data test <- Glass[-index,] # 1.2.1) extract actual # values and constuct # as.factor for {SLmetrics} # methods actual <- as.factor( test$Type ) # 1.2.2) construct as data.matrix # for predict method test <- data.matrix( test[,features] )"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"setting-up-parameters","dir":"Articles","previous_headings":"Setup","what":"Setting up parameters","title":"{SLmetrics}: Classification","text":"","code":"# 1) define parameters # across the vignette parameters <- list( objective = \"multiclass\", num_leaves = 4L, learning_rate = 0.5, num_class = 8 )"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"evaluation-function-f-score","dir":"Articles","previous_headings":"Setup","what":"Evaluation function: F score","title":"{SLmetrics}: Classification","text":"custom evaluation function use \\(F_{score}\\) \\(\\beta = 2\\) emphasize precision imporant recall. function defined , \\[ f_\\beta = (1 + \\beta^2) \\cdot \\frac{precision \\cdot recall}{(\\beta^2 \\cdot precision) + recall} \\] fbeta()-function returns vector scores class. want maximize micro-average.","code":"# 1) define the custom # evaluation metric evaluation_metric <- function( dtrain, preds) { # 1) extract values actual <- as.factor(dtrain) predicted <- lightgbm::get_field(preds, \"label\") value <- fbeta( actual = actual, predicted = predicted, beta = 2, micro = TRUE ) # 2) construnct output # list list( name = \"fbeta\", value = value, higher_better = TRUE ) }"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"training-model","dir":"Articles","previous_headings":"","what":"Training model","title":"{SLmetrics}: Classification","text":"train model using lgb.train()-function,","code":"model <- lightgbm::lgb.train( params = parameters, data = dtrain, nrounds = 10L, eval = evaluation_metric, verbose = -1 )"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"classification","dir":"Articles","previous_headings":"Performance Evaluation","what":"Classification","title":"{SLmetrics}: Classification","text":"extract predicted classes using predict()-function,","code":"# 1) prediction # from the model predicted <- as.factor( predict( model, newdata = test, type = \"class\" ) ) # 1) construct confusion # matrix confusion_matrix <- cmatrix( actual = actual, predicted = predicted ) # 2) visualize plot( confusion_matrix ) # 3) summarize summary( confusion_matrix ) #> Confusion Matrix (6 x 6) #> ================================================================================ #> 1 2 3 5 6 7 #> 1 13 2 1 0 0 0 #> 2 1 13 1 0 0 0 #> 3 0 0 2 0 0 0 #> 5 0 0 0 0 0 0 #> 6 0 0 0 1 2 0 #> 7 0 0 0 0 0 7 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.86 #> - Balanced Accuracy: 0.87 #> - Sensitivity: 0.86 #> - Specificity: 0.97 #> - Precision: 0.86"},{"path":"https://serkor1.github.io/SLmetrics/articles/classification_problems.html","id":"response","dir":"Articles","previous_headings":"Performance Evaluation","what":"Response","title":"{SLmetrics}: Classification","text":"extract response values using predict()-function, response can passed ROC()-function, ROC()-function returns data.frame-object, 264 rows corresponding length response multiplied number classes data. roc-object can plotted follows, ROC()-function accepts custom threshold-argument, can passed follows, new object 264 rows.","code":"# 1) prediction # from the model response <- predict( model, newdata = test ) # 1) calculate the reciever # operator characteristics roc <- ROC( actual = actual, response = response ) # 2) print the roc # object print(roc) #> threshold level label fpr tpr #> 1 Inf 1 1 0.0000 0.0000 #> 2 3.60e-15 1 1 0.0370 0.0000 #> 3 2.70e-15 1 1 0.0370 0.0625 #> 4 1.46e-15 1 1 0.0741 0.0625 #> 5 1.16e-15 1 1 0.1111 0.0625 #> 6 1.15e-15 1 1 0.1481 0.0625 #> 7 1.12e-15 1 1 0.1852 0.0625 #> 8 1.03e-15 1 1 0.2222 0.0625 #> 9 9.87e-16 1 1 0.2222 0.1250 #> 10 9.45e-16 1 1 0.2593 0.1250 #> [ reached 'max' / getOption(\"max.print\") -- omitted 254 rows ] # 1) plot roc # object plot(roc) # 1) create custom # thresholds thresholds <- seq( from = 0.9, to = 0.1, length.out = 10 ) # 2) pass the custom thresholds # to the ROC()-function roc <- ROC( actual = actual, response = response, thresholds = thresholds ) # 3) print the roc # object print(roc) #> threshold level label fpr tpr #> 1 Inf 1 1 0.0000 0.0000 #> 2 3.60e-15 1 1 0.0370 0.0000 #> 3 2.70e-15 1 1 0.0370 0.0625 #> 4 1.46e-15 1 1 0.0741 0.0625 #> 5 1.16e-15 1 1 0.1111 0.0625 #> 6 1.15e-15 1 1 0.1481 0.0625 #> 7 1.12e-15 1 1 0.1852 0.0625 #> 8 1.03e-15 1 1 0.2222 0.0625 #> 9 9.87e-16 1 1 0.2222 0.1250 #> 10 9.45e-16 1 1 0.2593 0.1250 #> [ reached 'max' / getOption(\"max.print\") -- omitted 254 rows ] # 1) viasualize # ROC plot(roc) # 1) summarise ROC summary(roc) #> Reciever Operator Characteristics #> ================================================================================ #> AUC #> - 1: 0.414 #> - 2: 0.761 #> - 3: 0.524 #> - 5: 0 #> - 6: 0.846 #> - 7: 0.024"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"setup","dir":"Articles","previous_headings":"","what":"Setup","title":"{SLmetrics}: Regression","text":"section setup essential workflow using {SLmetrics} {xgboost}.","code":""},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"the-data","dir":"Articles","previous_headings":"Setup","what":"The data","title":"{SLmetrics}: Regression","text":"","code":"# 1) load data # from {mlbench} data(\"BostonHousing\", package = \"mlbench\") # 1.1) define the features # and outcomes outcome <- c(\"medv\") features <- setdiff( x = colnames(BostonHousing), y = outcome ) # 2) split data in training # and test # 2.1) set seed for # for reproducibility set.seed(1903) # 2.2) exttract # indices with a simple # 90/10 split index <- sample(1:nrow(BostonHousing), size = 0.9 * nrow(BostonHousing)) # 1.1) extract training # data and construct # as lgb.Dataset train <- BostonHousing[index,] # 1.1.1) convert # to DMatrix dtrain <- xgboost::xgb.DMatrix( data = data.matrix(train[, features]), label = data.matrix(train[, outcome]) ) # 1.2) extract test # data test <- BostonHousing[-index,] # 1.2.1) convert to DMatrix dtest <- xgboost::xgb.DMatrix( data = data.matrix(test[, features]), label = data.matrix(test[, outcome]) ) # 1.2.2) extract actual # outcome actual <- test$medv"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"set-parameters","dir":"Articles","previous_headings":"Setup","what":"Set parameters","title":"{SLmetrics}: Regression","text":"","code":"# 1) define parameters # across the vignette parameters <- list( max_depth = 2, eta = 1 )"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"evaluation-function-relative-root-mean-squared-error-rrmse","dir":"Articles","previous_headings":"Setup","what":"Evaluation function: Relative Root Mean Squared Error (RRMSE)","title":"{SLmetrics}: Regression","text":"function defined , \\[ \\text{RRMSE} = \\sqrt{\\frac{\\sum_{=1}^n (y_i - \\upsilon_i)^2}{\\sum_{=1}^n (y_i - \\bar{y})^2}} \\] \\(y_i\\) actual values, \\(\\upsilon_i\\) predicted values \\(\\bar{y}\\) mean \\(y\\).","code":"# 1) define the custom # evaluation metric evaluation_metric <- function( preds, dtrain) { # 1) extract values actual <- xgboost::getinfo(dtrain, \"label\") predicted <- preds value <- rrse( actual = actual, predicted = predicted ) # 2) construnct output # list list( metric = \"RRMSE\", value = value ) }"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"training-model","dir":"Articles","previous_headings":"","what":"Training model","title":"{SLmetrics}: Regression","text":"train model using xgb.train()-function,","code":"# 1) model training model <- xgboost::xgb.train( params = parameters, data = dtrain, nrounds = 10L, verbose = 0, feval = evaluation_metric, watchlist = list( train = dtrain, test = dtest ), maximize = FALSE )"},{"path":"https://serkor1.github.io/SLmetrics/articles/regression_problems.html","id":"performance-evaluation","dir":"Articles","previous_headings":"","what":"Performance Evaluation","title":"{SLmetrics}: Regression","text":"extract predicted values using predict()-function, summarize performance using relative root mean squared error, root mean squared error concordance correlation coefficient","code":"# 1) out of sample # prediction predicted <- predict( model, newdata = dtest ) # 1) summarize all # performance measures # in data.frame data.frame( RRMSE = rrse(actual, predicted), RMSE = rmse(actual, predicted), CCC = ccc(actual, predicted) ) #> RRMSE RMSE CCC #> 1 0.4578544 3.705342 0.8840932"},{"path":"https://serkor1.github.io/SLmetrics/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Serkan Korkmaz. Maintainer, author, copyright holder.","code":""},{"path":"https://serkor1.github.io/SLmetrics/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Korkmaz S (2025). SLmetrics: Machine Learning Performance Evaluation Steroids. R package version 0.3-1, https://serkor1.github.io/SLmetrics/.","code":"@Manual{, title = {SLmetrics: Machine Learning Performance Evaluation on Steroids}, author = {Serkan Korkmaz}, year = {2025}, note = {R package version 0.3-1}, url = {https://serkor1.github.io/SLmetrics/}, }"},{"path":"https://serkor1.github.io/SLmetrics/index.html","id":"slmetrics-aiml-performance-evaluation-","dir":"","previous_headings":"","what":"Performance Evaluation","title":"Performance Evaluation","text":"{SLmetrics} low-level R package supervised AI/ML performance evaluation. uses {Rcpp} {RcppEigen} backend memory efficient fast execution various metrics. {SLmetrics} follows syntax base R, uses S3-classes.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/index.html","id":"shield-stable-version","dir":"","previous_headings":":information_source: Installation","what":"🛡️ Stable version","title":"Performance Evaluation","text":"","code":"## install stable release devtools::install_github( repo = 'https://github.com/serkor1/SLmetrics@*release', ref = 'main' )"},{"path":"https://serkor1.github.io/SLmetrics/index.html","id":"hammer_and_wrench-development-version","dir":"","previous_headings":":information_source: Installation","what":":hammer_and_wrench: Development version","title":"Performance Evaluation","text":"","code":"## install development version devtools::install_github( repo = 'https://github.com/serkor1/SLmetrics', ref = 'development' )"},{"path":"https://serkor1.github.io/SLmetrics/index.html","id":"information_source-code-of-conduct","dir":"","previous_headings":"","what":":information_source: Code of Conduct","title":"Performance Evaluation","text":"Please note {SLmetrics} project released Contributor Code Conduct. contributing project, agree abide terms.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"ROC()-function computes tpr() fpr() thresholds provided \\(response\\)- \\(thresholds\\)-vector. function constructs data.frame() grouped \\(k\\)-classes class treated binary classification problem.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"","code":"# S3 method for class 'factor' ROC(actual, response, thresholds = NULL, ...) # S3 method for class 'factor' weighted.ROC(actual, response, w, thresholds = NULL, ...) ROC(...) weighted.ROC(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. response -vector length \\(n\\). estimated response probabilities. thresholds optional -vector non-zero length (default: NULL). ... Arguments passed methods. w -vector length \\(n\\). NULL default.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"data.frame following form, threshold Thresholds used determine tpr() fpr() level level actual label levels actual fpr false positive rate tpr true positve rate","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TN_k}{\\#TN_k+\\#FP_k} $$ \\(\\#TN_k\\) \\(\\#FP_k\\) number true negatives false positives, respectively, class \\(k\\).","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/ROC.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — ROC.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes response <-predict(model, type = \"response\") # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) generate reciever # operator characteristics roc <- ROC( actual = actual, response = response ) # 5) plot by species plot(roc) # 5.1) summarise summary(roc) #> Reciever Operator Characteristics #> ================================================================================ #> AUC #> - Others: 0.114 #> - Virginica: 0.887 # 6) provide custom # threholds roc <- ROC( actual = actual, response = response, thresholds = seq(0, 1, length.out = 4) ) # 5) plot by species plot(roc)"},{"path":"https://serkor1.github.io/SLmetrics/reference/SLmetrics-package.html","id":null,"dir":"Reference","previous_headings":"","what":"SLmetrics: Machine Learning Performance Evaluation on Steroids — SLmetrics-package","title":"SLmetrics: Machine Learning Performance Evaluation on Steroids — SLmetrics-package","text":"{SLmetrics} lightweight package written C++ supervised unsupervised Machine Learning applications. package developed two primary goals mind: memory management execution speed. functions designed internal pointers references, ensuring passed objects copied memory, resulting optimized performance.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/SLmetrics-package.html","id":"handling-of-missing-values","dir":"Reference","previous_headings":"","what":"Handling of Missing Values","title":"SLmetrics: Machine Learning Performance Evaluation on Steroids — SLmetrics-package","text":"{SLmetrics} provide explicit handling missing values either regression classification applications. Users advised ensure input data preprocessed remove impute missing values passing functions. Since package heavily relies pointers references performance, passing data missing values may lead undefined behavior, including potential crashes R session. classification metrics support micro macro averages, {SLmetrics} handle invalid values divisions zero, ensuring robust computation accurate results.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/SLmetrics-package.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"SLmetrics: Machine Learning Performance Evaluation on Steroids — SLmetrics-package","text":"Maintainer: Serkan Korkmaz serkor1@duck.com (ORCID) [copyright holder]","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"accuracy() function computes accuracy two vectors predicted observed factor() values. weighted.accuracy() function computes weighted accuracy.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"","code":"# S3 method for class 'factor' accuracy(actual, predicted, ...) # S3 method for class 'factor' weighted.accuracy(actual, predicted, w, ...) # S3 method for class 'cmatrix' accuracy(x, ...) accuracy(...) weighted.accuracy(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels ... Arguments passed methods w -vector length \\(n\\). NULL default x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"metric calculated follows, $$ \\frac{\\#TP + \\#TN}{\\#TP + \\#TN + \\#FP + \\#FN} $$ \\(\\#TP\\), \\(\\#TN\\), \\(\\#FP\\), \\(\\#FN\\) number true positives, true negatives, false positives, false negatives, respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/accuracy.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{accuracy}\\) — accuracy.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model # performance cat( \"Accuracy\", accuracy( actual = actual, predicted = predicted ), \"Accuracy (weigthed)\", weighted.accuracy( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Accuracy #> 0.8066667 #> Accuracy (weigthed) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"baccuracy()-function computes balanced accuracy two vectors predicted observed factor() values. weighted.baccuracy() function computes weighted balanced accuracy.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"","code":"# S3 method for class 'factor' baccuracy(actual, predicted, adjust = FALSE, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.baccuracy(actual, predicted, w, adjust = FALSE, na.rm = TRUE, ...) # S3 method for class 'cmatrix' baccuracy(x, adjust = FALSE, na.rm = TRUE, ...) baccuracy(...) weighted.baccuracy(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels adjust logical value (default: FALSE). TRUE metric adjusted random chance \\(\\frac{1}{k}\\). na.rm logical values (default: TRUE). TRUE calculation metric based valid classes. ... Arguments passed methods w -vector length \\(n\\). NULL default x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"numeric-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"metric calculated follows, $$ \\frac{\\text{sensitivity} + \\text{specificty}}{2} $$ See sensitivity()- /specificity()-function details.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/baccuracy.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{balanced}\\) \\(\\text{accuracy}\\) — baccuracy.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate the # model cat( \"Balanced accuracy\", baccuracy( actual = actual, predicted = predicted ), \"Balanced accuracy (weigthed)\", weighted.baccuracy( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Balanced accuracy #> 0.78 #> Balanced accuracy (weigthed) #> 0.748419"},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"ccc()-function computes simple weighted concordance correlation coefficient two vectors predicted observed values. weighted.ccc() function computes weighted Concordance Correlation Coefficient. correction TRUE \\(\\sigma^2\\) adjusted \\(\\frac{1-n}{n}\\) intermediate steps.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"","code":"# S3 method for class 'numeric' ccc(actual, predicted, correction = FALSE, ...) # S3 method for class 'numeric' weighted.ccc(actual, predicted, w, correction = FALSE, ...) ccc(...) weighted.ccc(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. correction vector length \\(1\\) (default: FALSE). TRUE variance covariance adjusted \\(\\frac{1-n}{n}\\) ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"metric calculated follows, $$ \\rho_c = \\frac{2 \\rho \\sigma_x \\sigma_y}{\\sigma_x^2 + \\sigma_y^2 + (\\mu_x - \\mu_y)^2} $$ \\(\\rho\\) \\(\\text{pearson correlation coefficient}\\), \\(\\sigma\\) \\(\\text{standard deviation}\\) \\(\\mu\\) simple mean actual predicted.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/ccc.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{concordance}\\) \\(\\text{correlation}\\) \\(\\text{coefficient}\\) — ccc.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance cat( \"Concordance Correlation Coefficient\", ccc( actual = actual, predicted = predicted, correction = FALSE ), \"Concordance Correlation Coefficient (corrected)\", ccc( actual = actual, predicted = predicted, correction = TRUE ), \"Concordance Correlation Coefficient (weigthed)\", weighted.ccc( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg), correction = FALSE ), sep = \"\\n\" ) #> Concordance Correlation Coefficient #> 0.9299181 #> Concordance Correlation Coefficient (corrected) #> 0.9299181 #> Concordance Correlation Coefficient (weigthed) #> 0.9238419"},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"kappa()-function computes Cohen's \\(\\kappa\\), statistic measures inter-rater agreement categorical items two vectors predicted observed factor() values. weighted.ckappa() function computes weighted \\(\\kappa\\)-statistic. \\(\\beta \\neq 0\\) -diagonals confusion matrix penalized factor \\((y_{+} - y_{,-})^\\beta\\). See details.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"","code":"# S3 method for class 'factor' ckappa(actual, predicted, beta = 0, ...) # S3 method for class 'factor' weighted.ckappa(actual, predicted, w, beta = 0, ...) # S3 method for class 'cmatrix' ckappa(x, beta = 0, ...) ckappa(...) weighted.ckappa(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. beta value length 1 (default: 0). set value different zero, -diagonal confusion matrix penalized. ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"$$ \\frac{\\rho_p - \\rho_e}{1-\\rho_e} $$ \\(\\rho_p\\) empirical probability agreement predicted actual values, \\(\\rho_e\\) expected probability agreement random chance.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/ckappa.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute Cohen's \\(\\kappa\\)-statistic — ckappa.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance with # Cohens Kappa statistic cat( \"Kappa\", ckappa( actual = actual, predicted = predicted ), \"Kappa (penalized)\", ckappa( actual = actual, predicted = predicted, beta = 2 ), \"Kappa (weigthed)\", weighted.ckappa( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Kappa #> 0.5628141 #> Kappa (penalized) #> 0.5628141 #> Kappa (weigthed) #> 0.4971626"},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":null,"dir":"Reference","previous_headings":"","what":"Confusion Matrix — cmatrix.factor","title":"Confusion Matrix — cmatrix.factor","text":"cmatrix()-function uses cross-classifying factors build confusion matrix counts combination factor levels. row matrix represents actual factor levels, column represents predicted factor levels.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Confusion Matrix — cmatrix.factor","text":"","code":"# S3 method for class 'factor' cmatrix(actual, predicted, ...) # S3 method for class 'factor' weighted.cmatrix(actual, predicted, w, ...) cmatrix(...) weighted.cmatrix(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Confusion Matrix — cmatrix.factor","text":"actual -vector length \\(n\\), \\(k\\) levels. predicted -vector length \\(n\\), \\(k\\) levels. ... Arguments passed methods. w -vector length \\(n\\) (default: NULL) passed return weighted confusion matrix.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Confusion Matrix — cmatrix.factor","text":"named \\(k\\) x \\(k\\) class","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"dimensions","dir":"Reference","previous_headings":"","what":"Dimensions","title":"Confusion Matrix — cmatrix.factor","text":"robust defensive measure misspecififying confusion matrix. arguments correctly specified, resulting confusion matrix form:","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/cmatrix.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Confusion Matrix — cmatrix.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) summarise performance # in a confusion matrix # 4.1) unweighted matrix confusion_matrix <- cmatrix( actual = actual, predicted = predicted ) # 4.1.1) summarise matrix summary( confusion_matrix ) #> Confusion Matrix (2 x 2) #> ================================================================================ #> Virginica Others #> Virginica 35 15 #> Others 14 86 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.81 #> - Balanced Accuracy: 0.78 #> - Sensitivity: 0.81 #> - Specificity: 0.81 #> - Precision: 0.81 # 4.1.2) plot confusion # matrix plot( confusion_matrix ) # 4.2) weighted matrix confusion_matrix <- weighted.cmatrix( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) # 4.2.1) summarise matrix summary( confusion_matrix ) #> Confusion Matrix (2 x 2) #> ================================================================================ #> Virginica Others #> Virginica 53.40607 20.46301 #> Others 17.21660 58.91432 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.75 #> - Balanced Accuracy: 0.75 #> - Sensitivity: 0.75 #> - Specificity: 0.75 #> - Precision: 0.75 # 4.2.1) plot confusion # matrix plot( confusion_matrix )"},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"dor()-function computes Diagnostic Odds Ratio (DOR), single indicator test performance, two vectors predicted observed factor() values. weighted.dor() function computes weighted diagnostic odds ratio. aggregate = TRUE, function returns micro-average DOR across classes \\(k\\). default, returns class-wise DOR.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"","code":"# S3 method for class 'factor' dor(actual, predicted, ...) # S3 method for class 'factor' weighted.dor(actual, predicted, w, ...) # S3 method for class 'cmatrix' dor(x, ...) dor(...) weighted.dor(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"metric calculated class \\(k\\) follows, $$ \\text{DOR}_k = \\frac{\\text{PLR}_k}{\\text{NLR}_k} $$ \\(\\text{PLR}_k\\) \\(\\text{NLR}_k\\) positive negative likelihood ratio class \\(k\\), respectively. See plr() nlr() details. aggregate = TRUE, micro-average calculated , $$ \\overline{\\text{DOR}} = \\frac{\\overline{\\text{PLR}_k}}{\\overline{\\text{NLR}_k}} $$ \\(\\overline{\\text{PLR}}\\) \\(\\overline{\\text{NLR}}\\) micro-averaged positive negative likelihood ratio, respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/dor.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{diagnostic}\\) \\(\\text{odds}\\) \\(\\text{ratio}\\) — dor.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance # with Diagnostic Odds Ratio cat(\"Diagnostic Odds Ratio\", sep = \"\\n\") #> Diagnostic Odds Ratio dor( actual = actual, predicted = predicted ) #> [1] 14.33333 14.33333 cat(\"Diagnostic Odds Ratio (weighted)\", sep = \"\\n\") #> Diagnostic Odds Ratio (weighted) weighted.dor( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> [1] 8.930882 8.930882"},{"path":"https://serkor1.github.io/SLmetrics/reference/entropy.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the Entropy — entropy.matrix","title":"Compute the Entropy — entropy.matrix","text":"entropy() function calculates Entropy given probability distributions.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/entropy.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the Entropy — entropy.matrix","text":"","code":"# S3 method for class 'matrix' entropy(pk, axis = 0L, base = -1, ...) entropy(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/entropy.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the Entropy — entropy.matrix","text":"pk \\(n \\times k\\) -matrix predicted probabilities. \\(\\)-th row sum 1 (.e., valid probability distribution \\(k\\) classes). first column corresponds first factor level actual, second column second factor level, . axis value length 1 (Default: 0). Defines dimensions calculate entropy. 0: Total entropy, 1: row-wise, 2: column-wise base value length 1 (Default: -1). logarithmic base use. Default value specifies natural logarithms. ... Arguments passed methods","code":""},{"path":[]},{"path":[]},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"fbeta()-function computes \\(F_\\beta\\) score, weighted harmonic mean precision() recall(), two vectors predicted observed factor() values. parameter \\(\\beta\\) determines weight precision recall combined score. weighted.fbeta() function computes weighted \\(F_\\beta\\) score.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"","code":"# S3 method for class 'factor' fbeta(actual, predicted, beta = 1, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fbeta(actual, predicted, w, beta = 1, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fbeta(x, beta = 1, micro = NULL, na.rm = TRUE, ...) fbeta(...) weighted.fbeta(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. beta vector length \\(1\\) (default: \\(1\\)). micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"metric calculated class \\(k\\) follows, $$ (1 + \\beta^2) \\frac{\\text{Precision}_k \\cdot \\text{Recall}_k}{(\\beta^2 \\cdot \\text{Precision}_k) + \\text{Recall}_k} $$ precision \\(\\frac{\\#TP_k}{\\#TP_k + \\#FP_k}\\) recall (sensitivity) \\(\\frac{\\#TP_k}{\\#TP_k + \\#FN_k}\\), \\(\\beta\\) determines weight precision relative recall.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fbeta.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(F_{\\beta}\\)-score — fbeta.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using F1-score # 4.1) unweighted F1-score fbeta( actual = actual, predicted = predicted, beta = 1 ) #> Virginica Others #> 0.7070707 0.8557214 # 4.2) weighted F1-score weighted.fbeta( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), beta = 1 ) #> Virginica Others #> 0.7392265 0.7577002 # 5) evaluate overall performance # using micro-averaged F1-score cat( \"Micro-averaged F1-score\", fbeta( actual = actual, predicted = predicted, beta = 1, micro = TRUE ), \"Micro-averaged F1-score (weighted)\", weighted.fbeta( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), beta = 1, micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged F1-score #> 0.8066667 #> Micro-averaged F1-score (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"fdr()-function computes false discovery rate (FDR), proportion false positives among predicted positives, two vectors predicted observed factor() values. weighted.fdr() function computes weighted false discovery rate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"","code":"# S3 method for class 'factor' fdr(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fdr(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fdr(x, micro = NULL, na.rm = TRUE, ...) fdr(...) weighted.fdr(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#FP_k}{\\#TP_k+\\#FP_k} $$ \\(\\#TP_k\\) \\(\\#FP_k\\) number true psotives false positives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fdr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{false}\\) \\(\\text{discovery}\\) \\(\\text{rate}\\) — fdr.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using False Discovery Rate # 4.1) unweighted False Discovery Rate fdr( actual = actual, predicted = predicted ) #> Virginica Others #> 0.2857143 0.1485149 # 4.2) weighted False Discovery Rate weighted.fdr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.2437830 0.2577942 # 5) evaluate overall performance # using micro-averaged False Discovery Rate cat( \"Micro-averaged False Discovery Rate\", fdr( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged False Discovery Rate (weighted)\", weighted.fdr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged False Discovery Rate #> 0.1933333 #> Micro-averaged False Discovery Rate (weighted) #> 0.2511974"},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"fer()-function computes false omission rate (), proportion false negatives among predicted negatives, two vectors predicted observed factor() values. weighted.fer() function computes weighted false omission rate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"","code":"# S3 method for class 'factor' fer(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fer(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fer(x, micro = NULL, na.rm = TRUE, ...) fer(...) weighted.fer(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#FN_k}{\\#FN_k + \\#TN_k} $$ \\(\\#FN_k\\) \\(\\#TN_k\\) number false negatives true negatives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fer.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{false}\\) \\(\\text{omission}\\) \\(\\text{rate}\\) — fer.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using False Omission Rate # 4.1) unweighted False Omission Rate fer( actual = actual, predicted = predicted ) #> Virginica Others #> 0.1485149 0.2857143 # 4.2) weighted False Omission Rate weighted.fer( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.2577942 0.2437830 # 5) evaluate overall performance # using micro-averaged False Omission Rate cat( \"Micro-averaged False Omission Rate\", fer( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged False Omission Rate (weighted)\", weighted.fer( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged False Omission Rate #> 0.1933333 #> Micro-averaged False Omission Rate (weighted) #> 0.2511974"},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"fmi()-function computes Fowlkes-Mallows Index (FMI), measure similarity two sets clusterings, two vectors predicted observed factor() values.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"","code":"# S3 method for class 'factor' fmi(actual, predicted, ...) # S3 method for class 'cmatrix' fmi(x, ...) fmi(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels ... Arguments passed methods x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"metric calculated class \\(k\\) follows, $$ \\sqrt{\\frac{\\#TP_k}{\\#TP_k + \\#FP_k} \\times \\frac{\\#TP_k}{\\#TP_k + \\#FN_k}} $$ \\(\\#TP_k\\), \\(\\#FP_k\\), \\(\\#FN_k\\) represent number true positives, false positives, false negatives class \\(k\\), respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fmi.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{fowlkes}\\)-\\(\\text{fallows}\\) \\(\\text{index}\\) — fmi.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance # using Fowlkes Mallows Index cat( \"Fowlkes Mallows Index\", fmi( actual = actual, predicted = predicted ), sep = \"\\n\" ) #> Fowlkes Mallows Index #> 0.717045"},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"fpr()-function computes False Positive Rate (FPR), also known fall-(fallout()), two vectors predicted observed factor() values. weighted.fpr() function computes weighted false positive rate.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"","code":"# S3 method for class 'factor' fpr(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fpr(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fpr(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' fallout(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.fallout(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' fallout(x, micro = NULL, na.rm = TRUE, ...) fpr(...) fallout(...) weighted.fpr(...) weighted.fallout(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#FP_k}{\\#FP_k + \\#TN_k} $$ \\(\\#FP_k\\) \\(\\#TN_k\\) represent number false positives true negatives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/fpr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{false}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — fpr.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using False Positive Rate # 4.1) unweighted False Positive Rate fpr( actual = actual, predicted = predicted ) #> Virginica Others #> 0.14 0.30 # 4.2) weighted False Positive Rate weighted.fpr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.2261447 0.2770173 # 5) evaluate overall performance # using micro-averaged False Positive Rate cat( \"Micro-averaged False Positive Rate\", fpr( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged False Positive Rate (weighted)\", weighted.fpr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged False Positive Rate #> 0.1933333 #> Micro-averaged False Positive Rate (weighted) #> 0.2511974"},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"huberloss()-function computes simple weighted huber loss predicted observed vectors. weighted.huberloss() function computes weighted Huber Loss.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"","code":"# S3 method for class 'numeric' huberloss(actual, predicted, delta = 1, ...) # S3 method for class 'numeric' weighted.huberloss(actual, predicted, w, delta = 1, ...) huberloss(...) weighted.huberloss(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. delta -vector length \\(1\\) (default: \\(1\\)). threshold value switch functions (see calculation). ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"metric calculated follows, $$ \\frac{1}{2} (y - \\upsilon)^2 ~~ |y - \\upsilon| \\leq \\delta $$ $$ \\delta |y-\\upsilon|-\\frac{1}{2} \\delta^2 ~~ \\text{otherwise} $$ \\(y\\) \\(\\upsilon\\) actual predicted values respectively. w NULL, values aggregated using weights.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/huberloss.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{huber}\\) \\(\\text{loss}\\) — huberloss.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) calculate the metric # with delta 0.5 huberloss( actual = actual, predicted = predicted, delta = 0.5 ) #> [1] 0.7503286 # 3) caclulate weighted # metric using arbitrary weights w <- rbeta( n = 1e3, shape1 = 10, shape2 = 2 ) huberloss( actual = actual, predicted = predicted, delta = 0.5, w = w ) #> [1] 0.7503286"},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"jaccard()-function computes Jaccard Index, also known Intersection Union, two vectors predicted observed factor() values. weighted.jaccard() function computes weighted Jaccard Index.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"","code":"# S3 method for class 'factor' jaccard(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.jaccard(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' jaccard(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' csi(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.csi(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' csi(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' tscore(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.tscore(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' tscore(x, micro = NULL, na.rm = TRUE, ...) jaccard(...) csi(...) tscore(...) weighted.jaccard(...) weighted.csi(...) weighted.tscore(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TP_k}{\\#TP_k + \\#FP_k + \\#FN_k} $$ \\(\\#TP_k\\), \\(\\#FP_k\\), \\(\\#FN_k\\) represent number true positives, false positives, false negatives class \\(k\\), respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/jaccard.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{Jaccard}\\) \\(\\text{index}\\) — jaccard.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Jaccard Index # 4.1) unweighted Jaccard Index jaccard( actual = actual, predicted = predicted ) #> Virginica Others #> 0.5468750 0.7478261 # 4.2) weighted Jaccard Index weighted.jaccard( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.5863278 0.6099174 # 5) evaluate overall performance # using micro-averaged Jaccard Index cat( \"Micro-averaged Jaccard Index\", jaccard( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Jaccard Index (weighted)\", weighted.jaccard( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Jaccard Index #> 0.6759777 #> Micro-averaged Jaccard Index (weighted) #> 0.5984687"},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the Log Loss — logloss.factor","title":"Compute the Log Loss — logloss.factor","text":"logloss() function computes Log Loss observed classes () predicted probability distributions ( matrix). weighted.logloss() function weighted version, applying observation-specific weights.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the Log Loss — logloss.factor","text":"","code":"# S3 method for class 'factor' logloss(actual, response, normalize = TRUE, ...) # S3 method for class 'factor' weighted.logloss(actual, response, w, normalize = TRUE, ...) logloss(...) weighted.logloss(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the Log Loss — logloss.factor","text":"actual vector - length \\(n\\), \\(k\\) levels response \\(n \\times k\\) -matrix predicted probabilities. \\(\\)-th row sum 1 (.e., valid probability distribution \\(k\\) classes). first column corresponds first factor level actual, second column second factor level, . normalize -value (default: TRUE). TRUE, mean cross-entropy across observations returned; otherwise, sum cross-entropies returned. ... Arguments passed methods w -vector length \\(n\\). NULL default","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the Log Loss — logloss.factor","text":"-vector length 1","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the Log Loss — logloss.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/logloss.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the Log Loss — logloss.factor","text":"","code":"# 1) Recode the iris data set to a binary classification problem # Here, the positive class (\"Virginica\") is coded as 1, # and the rest (\"Others\") is coded as 0. iris$species_num <- as.numeric(iris$Species == \"virginica\") # 2) Fit a logistic regression model predicting species_num from Sepal.Length & Sepal.Width model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial(link = \"logit\") ) # 3) Generate predicted classes: \"Virginica\" vs. \"Others\" predicted <- factor( as.numeric(predict(model, type = \"response\") > 0.5), levels = c(1, 0), labels = c(\"Virginica\", \"Others\") ) # 3.1) Generate actual classes actual <- factor( x = iris$species_num, levels = c(1, 0), labels = c(\"Virginica\", \"Others\") ) # For Log Loss, we need predicted probabilities for each class. # Since it's a binary model, we create a 2-column matrix: # 1st column = P(\"Virginica\") # 2nd column = P(\"Others\") = 1 - P(\"Virginica\") predicted_probs <- predict(model, type = \"response\") response_matrix <- cbind(predicted_probs, 1 - predicted_probs) # 4) Evaluate unweighted Log Loss # 'logloss' takes (actual, response_matrix, normalize=TRUE/FALSE). # The factor 'actual' must have the positive class (Virginica) as its first level. unweighted_LogLoss <- logloss( actual = actual, # factor response = response_matrix, # numeric matrix of probabilities normalize = TRUE # normalize = TRUE ) # 5) Evaluate weighted Log Loss # We introduce a weight vector, for example: weights <- iris$Petal.Length / mean(iris$Petal.Length) weighted_LogLoss <- weighted.logloss( actual = actual, response = response_matrix, w = weights, normalize = TRUE ) # 6) Print Results cat( \"Unweighted Log Loss:\", unweighted_LogLoss, \"Weighted Log Loss:\", weighted_LogLoss, sep = \"\\n\" ) #> Unweighted Log Loss: #> 0.3863304 #> Weighted Log Loss: #> 0.491474"},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"mae()-function computes mean absolute error observed predicted vectors. weighted.mae() function computes weighted mean absolute error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"","code":"# S3 method for class 'numeric' mae(actual, predicted, ...) # S3 method for class 'numeric' weighted.mae(actual, predicted, w, ...) mae(...) weighted.mae(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"metric calulated follows, $$ \\frac{\\sum_i^n |y_i - \\upsilon_i|}{n} $$","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mae.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — mae.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Mean Absolute Error (MAE) cat( \"Mean Absolute Error\", mae( actual = actual, predicted = predicted, ), \"Mean Absolute Error (weighted)\", weighted.mae( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Mean Absolute Error #> 1.72274 #> Mean Absolute Error (weighted) #> 1.849613"},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"mape()-function computes mean absolute percentage error observed predicted vectors. weighted.mape() function computes weighted mean absolute percentage error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"","code":"# S3 method for class 'numeric' mape(actual, predicted, ...) # S3 method for class 'numeric' weighted.mape(actual, predicted, w, ...) mape(...) weighted.mape(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"metric calculated , $$ \\frac{1}{n} \\sum_i^n \\frac{|y_i - \\upsilon_i|}{|y_i|} $$","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mape.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mape.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Mean Absolute Percentage Error (MAPE) cat( \"Mean Absolute Percentage Error\", mape( actual = actual, predicted = predicted, ), \"Mean Absolute Percentage Error (weighted)\", weighted.mape( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Mean Absolute Percentage Error #> 0.08776196 #> Mean Absolute Percentage Error (weighted) #> 0.08574846"},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"mcc()-function computes Matthews Correlation Coefficient (MCC), also known \\(\\phi\\)-coefficient, two vectors predicted observed factor() values. weighted.mcc() function computes weighted Matthews Correlation Coefficient.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"","code":"# S3 method for class 'factor' mcc(actual, predicted, ...) # S3 method for class 'factor' weighted.mcc(actual, predicted, w, ...) # S3 method for class 'cmatrix' mcc(x, ...) # S3 method for class 'factor' phi(actual, predicted, ...) # S3 method for class 'factor' weighted.phi(actual, predicted, w, ...) # S3 method for class 'cmatrix' phi(x, ...) mcc(...) weighted.mcc(...) phi(...) weighted.phi(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels ... Arguments passed methods w -vector length \\(n\\). NULL default x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"metric calculated follows, $$ \\frac{\\#TP \\times \\#TN - \\#FP \\times \\#FN}{\\sqrt{(\\#TP + \\#FP)(\\#TP + \\#FN)(\\#TN + \\#FP)(\\#TN + \\#FN)}} $$","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mcc.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{Matthews}\\) \\(\\text{Correlation}\\) \\(\\text{Coefficient}\\) — mcc.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate performance # using Matthews Correlation Coefficient cat( \"Matthews Correlation Coefficient\", mcc( actual = actual, predicted = predicted ), \"Matthews Correlation Coefficient (weighted)\", weighted.mcc( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Matthews Correlation Coefficient #> 0.562878 #> Matthews Correlation Coefficient (weighted) #> 0.4976298"},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"mpe()-function computes mean percentage error observed predicted vectors. weighted.mpe() function computes weighted mean percentage error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"","code":"# S3 method for class 'numeric' mpe(actual, predicted, ...) # S3 method for class 'numeric' weighted.mpe(actual, predicted, w, ...) mpe(...) weighted.mpe(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"metric calculated , $$ \\frac{1}{n} \\sum_i^n \\frac{y_i - \\upsilon_i}{y_i} $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mpe.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{mean}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — mpe.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Mean Percentage Error (MPE) cat( \"Mean Percentage Error\", mpe( actual = actual, predicted = predicted, ), \"Mean Percentage Error (weighted)\", weighted.mpe( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Mean Percentage Error #> -0.008569118 #> Mean Percentage Error (weighted) #> 1.734723e-18"},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"mse()-function computes mean squared error observed predicted vectors. weighted.mse() function computes weighted mean squared error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"","code":"# S3 method for class 'numeric' mse(actual, predicted, ...) # S3 method for class 'numeric' weighted.mse(actual, predicted, w, ...) mse(...) weighted.mse(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"metric calculated , $$ \\frac{1}{n} \\sum_i^n (y_i - \\upsilon_i)^2 $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/mse.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — mse.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Mean Squared Error (MSE) cat( \"Mean Squared Error\", mse( actual = actual, predicted = predicted, ), \"Mean Squared Error (weighted)\", weighted.mse( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Mean Squared Error #> 4.609201 #> Mean Squared Error (weighted) #> 5.283426"},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"nlr()-function computes negative likelihood ratio, also known likelihood ratio negative results, two vectors predicted observed factor() values. weighted.nlr() function computes weighted negative likelihood ratio.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"","code":"# S3 method for class 'factor' nlr(actual, predicted, ...) # S3 method for class 'factor' weighted.nlr(actual, predicted, w, ...) # S3 method for class 'cmatrix' nlr(x, ...) nlr(...) weighted.nlr(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{1 - \\text{Sensitivity}_k}{\\text{Specificity}_k} $$ sensitivity (true positive rate) calculated \\(\\frac{\\#TP_k}{\\#TP_k + \\#FN_k}\\) specificity (true negative rate) calculated \\(\\frac{\\#TN_k}{\\#TN_k + \\#FP_k}\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/nlr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{negative}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — nlr.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance # with class-wise negative likelihood ratios cat(\"Negative Likelihood Ratio\", sep = \"\\n\") #> Negative Likelihood Ratio nlr( actual = actual, predicted = predicted ) #> [1] 0.3488372 0.2000000 cat(\"Negative Likelihood Ratio (weighted)\", sep = \"\\n\") #> Negative Likelihood Ratio (weighted) weighted.nlr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> [1] 0.3579704 0.3127941"},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"npv()-function computes negative predictive value, also known True Negative Predictive Value, two vectors predicted observed factor() values. weighted.npv() function computes weighted negative predictive value.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"","code":"# S3 method for class 'factor' npv(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.npv(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' npv(x, micro = NULL, na.rm = TRUE, ...) npv(...) weighted.npv(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TN_k}{\\#TN_k + \\#FN_k} $$ \\(\\#TN_k\\) \\(\\#FN_k\\) number true negatives false negatives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/npv.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{negative}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — npv.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Negative Predictive Value # 4.1) unweighted Negative Predictive Value npv( actual = actual, predicted = predicted ) #> Virginica Others #> 0.8514851 0.7142857 # 4.2) weighted Negative Predictive Value weighted.npv( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.7422058 0.7562170 # 5) evaluate overall performance # using micro-averaged Negative Predictive Value cat( \"Micro-averaged Negative Predictive Value\", npv( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Negative Predictive Value (weighted)\", weighted.npv( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Negative Predictive Value #> 0.8066667 #> Micro-averaged Negative Predictive Value (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"pinball()-function computes pinball loss observed predicted vectors. weighted.pinball() function computes weighted Pinball Loss.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"","code":"# S3 method for class 'numeric' pinball(actual, predicted, alpha = 0.5, deviance = FALSE, ...) # S3 method for class 'numeric' weighted.pinball(actual, predicted, w, alpha = 0.5, deviance = FALSE, ...) pinball(...) weighted.pinball(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. alpha -value length \\(1\\) (default: \\(0.5\\)). slope pinball loss function. deviance -value length 1 (default: FALSE). TRUE function returns \\(D^2\\) loss. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"metric calculated , $$\\text{PinballLoss}_{\\text{unweighted}} = \\frac{1}{n} \\sum_{=1}^{n} \\left[ \\alpha \\cdot \\max(0, y_i - \\hat{y}_i) - (1 - \\alpha) \\cdot \\max(0, \\hat{y}_i - y_i) \\right]$$ \\(y_i\\) actual value, \\(\\hat{y}_i\\) predicted value \\(\\alpha\\) quantile level.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/pinball.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{pinball}\\) \\(\\text{loss}\\) — pinball.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Pinball Loss cat( \"Pinball Loss\", pinball( actual = actual, predicted = predicted, ), \"Pinball Loss (weighted)\", weighted.pinball( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Pinball Loss #> 0.8613701 #> Pinball Loss (weighted) #> 0.9248066"},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"plr()-function computes positive likelihood ratio, also known likelihood ratio positive results, two vectors predicted observed factor() values. weighted.plr() function computes weighted positive likelihood ratio.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"","code":"# S3 method for class 'factor' plr(actual, predicted, ...) # S3 method for class 'factor' weighted.plr(actual, predicted, w, ...) # S3 method for class 'cmatrix' plr(x, ...) plr(...) weighted.plr(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\text{Sensitivity}_k}{1 - \\text{Specificity}_k} $$ sensitivity (true positive rate) calculated \\(\\frac{\\#TP_k}{\\#TP_k + \\#FN_k}\\) specificity (true negative rate) calculated \\(\\frac{\\#TN_k}{\\#TN_k + \\#FP_k}\\). aggregate = TRUE, micro-average calculated, $$ \\frac{\\sum_{k=1}^k \\text{Sensitivity}_k}{1 - \\sum_{k=1}^k \\text{Specificity}_k} $$","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/plr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{positive}\\) \\(\\text{likelihood}\\) \\(\\text{ratio}\\) — plr.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model performance # with class-wise positive likelihood ratios cat(\"Positive Likelihood Ratio\", sep = \"\\n\") #> Positive Likelihood Ratio plr( actual = actual, predicted = predicted ) #> [1] 5.000000 2.866667 cat(\"Positive Likelihood Ratio (weighted)\", sep = \"\\n\") #> Positive Likelihood Ratio (weighted) weighted.plr( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> [1] 3.196992 2.793527"},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"prROC()-function computes precision() recall() thresholds provided \\(response\\)- \\(thresholds\\)-vector. function constructs data.frame() grouped \\(k\\)-classes class treated binary classification problem.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"","code":"# S3 method for class 'factor' prROC(actual, response, thresholds = NULL, ...) # S3 method for class 'factor' weighted.prROC(actual, response, w, thresholds = NULL, ...) prROC(...) weighted.prROC(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. response -vector length \\(n\\). estimated response probabilities. thresholds optional -vector non-zero length (default: NULL). ... Arguments passed methods. w -vector length \\(n\\). NULL default.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"data.frame following form, threshold Thresholds used determine recall() precision() level level actual label levels actual recall recall precision precision","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TN_k}{\\#TN_k+\\#FP_k} $$ \\(\\#TN_k\\) \\(\\#FP_k\\) number true negatives false positives, respectively, class \\(k\\).","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/prROC.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{reciever}\\) \\(\\text{operator}\\) \\(\\text{characteristics}\\) — prROC.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes response <- predict(model, type = \"response\") # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) generate reciever # operator characteristics roc <- prROC( actual = actual, response = response ) # 5) plot by species plot(roc) # 5.1) summarise summary(roc) #> Reciever Operator Characteristics #> ================================================================================ #> AUC #> - Others: 0.473 #> - Virginica: 0.775 # 6) provide custom # threholds roc <- prROC( actual = actual, response = response, thresholds = seq(0, 1, length.out = 4) ) # 5) plot by species plot(roc)"},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"precision()-function computes precision, also known positive predictive value (PPV), two vectors predicted observed factor() values. weighted.precision() function computes weighted precision.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"","code":"# S3 method for class 'factor' precision(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.precision(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' precision(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' ppv(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.ppv(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' ppv(x, micro = NULL, na.rm = TRUE, ...) precision(...) weighted.precision(...) ppv(...) weighted.ppv(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TP_k}{\\#TP_k + \\#FP_k} $$ \\(\\#TP_k\\) \\(\\#FP_k\\) number true positives false positives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/precision.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{precision}\\) or \\(\\text{positive}\\) \\(\\text{predictive}\\) \\(\\text{value}\\) — precision.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Precision # 4.1) unweighted Precision precision( actual = actual, predicted = predicted ) #> Virginica Others #> 0.7142857 0.8514851 # 4.2) weighted Precision weighted.precision( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.7562170 0.7422058 # 5) evaluate overall performance # using micro-averaged Precision cat( \"Micro-averaged Precision\", precision( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Precision (weighted)\", weighted.precision( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Precision #> 0.8066667 #> Micro-averaged Precision (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"rae()-function calculates normalized relative absolute error predicted observed vectors. weighted.rae() function computes weigthed relative absolute error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"","code":"# S3 method for class 'numeric' rae(actual, predicted, ...) # S3 method for class 'numeric' weighted.rae(actual, predicted, w, ...) rae(...) weighted.rae(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"Relative Absolute Error (RAE) calculated : $$ \\text{RAE} = \\frac{\\sum_{=1}^n |y_i - \\upsilon_i|}{\\sum_{=1}^n |y_i - \\bar{y}|} $$ \\(y_i\\) actual values, \\(\\upsilon_i\\) predicted values, \\(\\bar{y}\\) mean actual values.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rae.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{relative}\\) \\(\\text{absolute}\\) \\(\\text{error}\\) — rae.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Relative Absolute Error (RAE) cat( \"Relative Absolute Error\", rae( actual = actual, predicted = predicted, ), \"Relative Absolute Error (weighted)\", weighted.rae( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Relative Absolute Error #> 0.3654168 #> Relative Absolute Error (weighted) #> 0.363789"},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"recall()-function computes recall, also known sensitivity True Positive Rate (TPR), two vectors predicted observed factor() values. weighted.recall() function computes weighted recall.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"","code":"# S3 method for class 'factor' recall(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.recall(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' recall(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' sensitivity(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.sensitivity(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' sensitivity(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' tpr(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.tpr(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' tpr(x, micro = NULL, na.rm = TRUE, ...) recall(...) sensitivity(...) tpr(...) weighted.recall(...) weighted.sensitivity(...) weighted.tpr(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TP_k}{\\#TP_k + \\#FN_k} $$ \\(\\#TP_k\\) \\(\\#FN_k\\) number true positives false negatives, respectively, class \\(k\\).","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/recall.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(recall\\), \\(sensitivity\\) or \\(\\text{true}\\) \\(\\text{positive}\\) \\(\\text{rate}\\) — recall.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Recall # 4.1) unweighted Recall recall( actual = actual, predicted = predicted ) #> Virginica Others #> 0.70 0.86 # 4.2) weighted Recall weighted.recall( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.7229827 0.7738553 # 5) evaluate overall performance # using micro-averaged Recall cat( \"Micro-averaged Recall\", recall( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Recall (weighted)\", weighted.recall( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Recall #> 0.8066667 #> Micro-averaged Recall (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"rmse()-function computes root mean squared error observed predicted vectors. weighted.rmse() function computes weighted root mean squared error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"","code":"# S3 method for class 'numeric' rmse(actual, predicted, ...) # S3 method for class 'numeric' weighted.rmse(actual, predicted, w, ...) rmse(...) weighted.rmse(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"metric calculated , $$ \\sqrt{\\frac{1}{n} \\sum_i^n (y_i - \\upsilon_i)^2} $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rmse.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rmse.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Root Mean Squared Error (RMSE) cat( \"Root Mean Squared Error\", rmse( actual = actual, predicted = predicted, ), \"Root Mean Squared Error (weighted)\", weighted.rmse( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Root Mean Squared Error #> 2.146905 #> Root Mean Squared Error (weighted) #> 2.29857"},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"rmsle()-function computes root mean squared logarithmic error observed predicted vectors. weighted.rmsle() function computes weighted root mean squared logarithmic error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"","code":"# S3 method for class 'numeric' rmsle(actual, predicted, ...) # S3 method for class 'numeric' weighted.rmsle(actual, predicted, w, ...) rmsle(...) weighted.rmsle(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"metric calculated , $$ \\sqrt{\\frac{1}{n} \\sum_i^n (\\log(1 + y_i) - \\log(1 + \\upsilon_i))^2} $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rmsle.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{logarithmic}\\) \\(\\text{error}\\) — rmsle.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Root Mean Squared Logarithmic Error (RMSLE) cat( \"Root Mean Squared Logarithmic Error\", rmsle( actual = actual, predicted = predicted, ), \"Root Mean Squared Logarithmic Error (weighted)\", weighted.rmsle( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Root Mean Squared Logarithmic Error #> 0.1055744 #> Root Mean Squared Logarithmic Error (weighted) #> 0.1025173"},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"rrmse()-function computes Relative Root Mean Squared Error observed predicted vectors. weighted.rrmse() function computes weighted Relative Root Mean Squared Error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"","code":"# S3 method for class 'numeric' rrmse(actual, predicted, normalization = 1L, ...) # S3 method for class 'numeric' weighted.rrmse(actual, predicted, w, normalization = 1L, ...) rrmse(...) weighted.rrmse(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. normalization -value length \\(1\\) (default: \\(1\\)). \\(0\\): mean-normalization, \\(1\\): range-normalization, \\(2\\): IQR-normalization. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"metric calculated , $$ \\frac{RMSE}{\\gamma} $$ \\(\\gamma\\) normalization factor.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rrmse.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{relative}\\) \\(\\text{root}\\) \\(\\text{mean}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrmse.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Relative Root Mean Squared Error (RRMSE) cat( \"IQR Relative Root Mean Squared Error\", rrmse( actual = actual, predicted = predicted, normalization = 2 ), \"IQR Relative Root Mean Squared Error (weighted)\", weighted.rrmse( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg), normalization = 2 ), sep = \"\\n\" ) #> IQR Relative Root Mean Squared Error #> 0.2911058 #> IQR Relative Root Mean Squared Error (weighted) #> 0.2642035"},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"rrse()-function calculates root relative squared error predicted observed vectors. weighted.rrse() function computes weighed root relative squared errorr.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"","code":"# S3 method for class 'numeric' rrse(actual, predicted, ...) # S3 method for class 'numeric' weighted.rrse(actual, predicted, w, ...) rrse(...) weighted.rrse(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"metric calculated , $$ \\text{RRSE} = \\sqrt{\\frac{\\sum_{=1}^n (y_i - \\upsilon_i)^2}{\\sum_{=1}^n (y_i - \\bar{y})^2}} $$ \\(y_i\\) actual values, \\(\\upsilon_i\\) predicted values, \\(\\bar{y}\\) mean actual values.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rrse.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{root}\\) \\(\\text{relative}\\) \\(\\text{squared}\\) \\(\\text{error}\\) — rrse.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Relative Root Squared Errror (RRSE) cat( \"Relative Root Squared Errror\", rrse( actual = actual, predicted = predicted, ), \"Relative Root Squared Errror (weighted)\", weighted.rrse( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Relative Root Squared Errror #> 0.3619174 #> Relative Root Squared Errror (weighted) #> 0.3691304"},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(R^2\\) — rsq.numeric","title":"Compute the \\(R^2\\) — rsq.numeric","text":"rsq()-function calculates \\(R^2\\), coefficient determination, ovserved predicted vectors. default rsq() returns unadjusted \\(R^2\\). adjusted \\(R^2\\) set \\(k = \\kappa - 1\\), \\(\\kappa\\) number parameters.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(R^2\\) — rsq.numeric","text":"","code":"# S3 method for class 'numeric' rsq(actual, predicted, k = 0, ...) # S3 method for class 'numeric' weighted.rsq(actual, predicted, w, k = 0, ...) rsq(...) weighted.rsq(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(R^2\\) — rsq.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. k -vector length 1 (default: 0). \\(k>0\\) function returns adjusted \\(R^2\\). ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(R^2\\) — rsq.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(R^2\\) — rsq.numeric","text":"metric calculated follows, $$ R^2 = 1 - \\frac{\\text{SSE}}{\\text{SST}} \\frac{n-1}{n - (k + 1)} $$ \\(\\text{SSE}\\) sum squared errors, \\(\\text{SST}\\) total sum squared errors, \\(n\\) number observations, \\(k\\) number non-constant parameters.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/rsq.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(R^2\\) — rsq.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure in-sample performance actual <- mtcars$mpg predicted <- fitted(model) # 2) calculate performance # using R squared adjusted and # unadjused for features cat( \"Rsq\", rsq( actual = actual, predicted = fitted(model) ), \"Rsq (Adjusted)\", rsq( actual = actual, predicted = fitted(model), k = ncol(model.matrix(model)) - 1 ), sep = \"\\n\" ) #> Rsq #> 0.8690158 #> Rsq (Adjusted) #> 0.8066423"},{"path":"https://serkor1.github.io/SLmetrics/reference/setNumberThreads.html","id":null,"dir":"Reference","previous_headings":"","what":"Set the Number of Threads for Parallel Computations — setNumberThreads","title":"Set the Number of Threads for Parallel Computations — setNumberThreads","text":"function sets number threads used parallel computations. set -1, available threads utilized.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/setNumberThreads.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Set the Number of Threads for Parallel Computations — setNumberThreads","text":"","code":"setNumberThreads(value = -1L)"},{"path":"https://serkor1.github.io/SLmetrics/reference/setNumberThreads.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Set the Number of Threads for Parallel Computations — setNumberThreads","text":"value specifying number threads use (Default: -1). Default -1, uses available threads.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/setNumberThreads.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Set the Number of Threads for Parallel Computations — setNumberThreads","text":"","code":"if (FALSE) { # \\dontrun{ setNumberThreads(4) } # }"},{"path":"https://serkor1.github.io/SLmetrics/reference/setUseOpenMP.html","id":null,"dir":"Reference","previous_headings":"","what":"Enable or Disable OpenMP Parallelization — setUseOpenMP","title":"Enable or Disable OpenMP Parallelization — setUseOpenMP","text":"function allows enable disable use OpenMP parallelizing computations.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/setUseOpenMP.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Enable or Disable OpenMP Parallelization — setUseOpenMP","text":"","code":"setUseOpenMP(value = FALSE)"},{"path":"https://serkor1.github.io/SLmetrics/reference/setUseOpenMP.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Enable or Disable OpenMP Parallelization — setUseOpenMP","text":"value value length 1 (Default: FALSE). length, OpenMP used parallelize computations.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/setUseOpenMP.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Enable or Disable OpenMP Parallelization — setUseOpenMP","text":"","code":"if (FALSE) { # \\dontrun{ setUseOpenMP(TRUE) } # }"},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"smape()-function computes symmetric mean absolute percentage error observed predicted vectors. weighted.smape() function computes weighted symmetric mean absolute percentage error.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"","code":"# S3 method for class 'numeric' smape(actual, predicted, ...) # S3 method for class 'numeric' weighted.smape(actual, predicted, w, ...) smape(...) weighted.smape(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"actual -vector length \\(n\\). observed (continuous) response variable. predicted -vector length \\(n\\). estimated (continuous) response variable. ... Arguments passed methods. w -vector length \\(n\\). weight assigned observation data.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":" vector length 1.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"metric calculated follows, $$ \\sum_i^n \\frac{1}{n} \\frac{|y_i - \\upsilon_i|}{\\frac{|y_i|+|\\upsilon_i|}{2}} $$ \\(y_i\\) \\(\\upsilon_i\\) actual predicted values respectively.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/smape.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{symmetric}\\) \\(\\text{mean}\\) \\(\\text{absolute}\\) \\(\\text{percentage}\\) \\(\\text{error}\\) — smape.numeric","text":"","code":"# 1) fit a linear # regression model <- lm( mpg ~ ., data = mtcars ) # 1.1) define actual # and predicted values # to measure performance actual <- mtcars$mpg predicted <- fitted(model) # 2) evaluate in-sample model # performance using Symmetric Mean Absolute Percentage Error (MAPE) cat( \"Symmetric Mean Absolute Percentage Error\", mape( actual = actual, predicted = predicted, ), \"Symmetric Mean Absolute Percentage Error (weighted)\", weighted.mape( actual = actual, predicted = predicted, w = mtcars$mpg/mean(mtcars$mpg) ), sep = \"\\n\" ) #> Symmetric Mean Absolute Percentage Error #> 0.08776196 #> Symmetric Mean Absolute Percentage Error (weighted) #> 0.08574846"},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":null,"dir":"Reference","previous_headings":"","what":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":" specificity()-function computes specificity, also known True Negative Rate (TNR) selectivity, two vectors predicted observed factor() values. weighted.specificity() function computes weighted specificity.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"","code":"# S3 method for class 'factor' specificity(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.specificity(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' specificity(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' tnr(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.tnr(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' tnr(x, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' selectivity(actual, predicted, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'factor' weighted.selectivity(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) # S3 method for class 'cmatrix' selectivity(x, micro = NULL, na.rm = TRUE, ...) specificity(...) tnr(...) selectivity(...) weighted.specificity(...) weighted.tnr(...) weighted.selectivity(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"actual vector - length \\(n\\), \\(k\\) levels. predicted vector -vector length \\(n\\), \\(k\\) levels. micro -value length \\(1\\) (default: NULL). TRUE returns micro average across \\(k\\) classes, FALSE returns macro average. na.rm value length \\(1\\) (default: TRUE). TRUE, NA values removed computation. argument relevant micro != NULL. na.rm = TRUE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA))). na.rm = FALSE, computation corresponds sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA)). ... Arguments passed methods w -vector length \\(n\\). NULL default. x confusion matrix created cmatrix().","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"micro NULL (default), named -vector length k micro TRUE FALSE, -vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"metric calculated class \\(k\\) follows, $$ \\frac{\\#TN_k}{\\#TN_k+\\#FP_k} $$ \\(\\#TN_k\\) \\(\\#FP_k\\) number true negatives false positives, respectively, class \\(k\\).","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/specificity.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Specificity or \\(\\text{true}\\) \\(\\text{negative}\\) \\(\\text{rate}\\) — specificity.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate class-wise performance # using Specificity # 4.1) unweighted Specificity specificity( actual = actual, predicted = predicted ) #> Virginica Others #> 0.86 0.70 # 4.2) weighted Specificity weighted.specificity( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ) #> Virginica Others #> 0.7738553 0.7229827 # 5) evaluate overall performance # using micro-averaged Specificity cat( \"Micro-averaged Specificity\", specificity( actual = actual, predicted = predicted, micro = TRUE ), \"Micro-averaged Specificity (weighted)\", weighted.specificity( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length), micro = TRUE ), sep = \"\\n\" ) #> Micro-averaged Specificity #> 0.8066667 #> Micro-averaged Specificity (weighted) #> 0.7488026"},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"zerooneloss()-function computes zero-one Loss, classification loss function calculates proportion misclassified instances two vectors predicted observed factor() values. weighted.zerooneloss() function computes weighted zero-one loss.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"","code":"# S3 method for class 'factor' zerooneloss(actual, predicted, ...) # S3 method for class 'factor' weighted.zerooneloss(actual, predicted, w, ...) # S3 method for class 'cmatrix' zerooneloss(x, ...) zerooneloss(...) weighted.zerooneloss(...)"},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"actual vector - length \\(n\\), \\(k\\) levels predicted vector -vector length \\(n\\), \\(k\\) levels ... Arguments passed methods w -vector length \\(n\\). NULL default x confusion matrix created cmatrix()","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"-vector length 1","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"calculation","dir":"Reference","previous_headings":"","what":"Calculation","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"metric calculated follows, $$ \\frac{\\#FP + \\#FN}{\\#TP + \\#TN + \\#FP + \\#FN} $$ \\(\\#TP\\), \\(\\#TN\\), \\(\\#FP\\), \\(\\#FN\\) represent true positives, true negatives, false positives, false negatives, respectively.","code":""},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"creating-lt-factor-gt-","dir":"Reference","previous_headings":"","what":"Creating ","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"Consider classification problem three classes: , B, C. actual vector factor() values defined follows: , values 1, 2, 3 mapped , B, C, respectively. Now, suppose model predict B's. predicted vector factor() values defined follows: cases, \\(k = 3\\), determined indirectly levels argument.","code":"## set seed set.seed(1903) ## actual factor( x = sample(x = 1:3, size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] B A B B A C B C C A #> Levels: A B C ## set seed set.seed(1903) ## predicted factor( x = sample(x = c(1, 3), size = 10, replace = TRUE), levels = c(1, 2, 3), labels = c(\"A\", \"B\", \"C\") ) #> [1] C A C C C C C C A C #> Levels: A B C"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/reference/zerooneloss.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute the \\(\\text{Zero}\\)-\\(\\text{One}\\) \\(\\text{Loss}\\) — zerooneloss.factor","text":"","code":"# 1) recode Iris # to binary classification # problem iris$species_num <- as.numeric( iris$Species == \"virginica\" ) # 2) fit the logistic # regression model <- glm( formula = species_num ~ Sepal.Length + Sepal.Width, data = iris, family = binomial( link = \"logit\" ) ) # 3) generate predicted # classes predicted <- factor( as.numeric( predict(model, type = \"response\") > 0.5 ), levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 3.1) generate actual # classes actual <- factor( x = iris$species_num, levels = c(1,0), labels = c(\"Virginica\", \"Others\") ) # 4) evaluate model # performance using Zero-One Loss cat( \"Zero-One Loss\", zerooneloss( actual = actual, predicted = predicted ), \"Zero-One Loss (weigthed)\", weighted.zerooneloss( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) ), sep = \"\\n\" ) #> Zero-One Loss #> 0.1933333 #> Zero-One Loss (weigthed) #> 0.2511974"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"version-03-1","dir":"Changelog","previous_headings":"","what":"Version 0.3-1","title":"Version 0.3-1","text":"Version 0.3-1 considered pre-release {SLmetrics}. expect breaking changes, unless major bug/issue reported nature forces breaking changes.","code":""},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"rocket-improvements-0-3-1","dir":"Changelog","previous_headings":"","what":"🚀 Improvements","title":"Version 0.3-1","text":"OpenMP Support (PR https://github.com/serkor1/SLmetrics/pull/40): {SLmetrics} now supports parallelization OpenMP. OpenMP can utilized follows:","code":"# 1) probability distribution # generator rand.sum <- function(n){ x <- sort(runif(n-1)) c(x,1) - c(0,x) } # 2) generate probability # matrix set.seed(1903) pk <- t(replicate(100,rand.sum(1e3))) # 3) Enable OpenMP SLmetrics::setUseOpenMP(TRUE) #> OpenMP usage set to: enabled system.time(SLmetrics::entropy(pk)) #> user system elapsed #> 0.280 0.001 0.012 # 3) Disable OpenMP SLmetrics::setUseOpenMP(FALSE) #> OpenMP usage set to: disabled system.time(SLmetrics::entropy(pk)) #> user system elapsed #> 0.001 0.000 0.001"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"bug-bug-fixes-0-3-1","dir":"Changelog","previous_headings":"","what":"🐛 Bug-fixes","title":"Version 0.3-1","text":"Plot-method ROC prROC (https://github.com/serkor1/SLmetrics/issues/36): Fixed bug plot.ROC() plot.prROC() panels = FALSE additional lines added plot.","code":""},{"path":[]},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"new-feature-0-3-0","dir":"Changelog","previous_headings":"","what":"New Feature","title":"Version 0.3-0","text":"Relative Root Mean Squared Error: function normalizes Root Mean Squared Error facttor. official way normalizing - {SLmetrics} RMSE can normalized using three options; mean-, range- IQR-normalization. can used follows, Log Loss: Weighted unweighted Log Loss, without normalization. function can used follows, Weighted Receiver Operator Characteristics: weighted.ROC(), function calculates weighted True Positive False Positive Rates threshold. Weighted Precision-Recall Curve: weighted.prROC(), function calculates weighted Recall Precsion threshold.","code":"# 1) define values actual <- rnorm(1e3) predicted <- actual + rnorm(1e3) # 2) calculate Relative Root Mean Squared Error cat( \"Mean Relative Root Mean Squared Error\", SLmetrics::rrmse( actual = actual, predicted = predicted, normalization = 0 ), \"Range Relative Root Mean Squared Error\", SLmetrics::rrmse( actual = actual, predicted = predicted, normalization = 1 ), \"IQR Relative Root Mean Squared Error\", SLmetrics::rrmse( actual = actual, predicted = predicted, normalization = 2 ), sep = \"\\n\" ) #> Mean Relative Root Mean Squared Error #> 40.74819 #> Range Relative Root Mean Squared Error #> 0.1556036 #> IQR Relative Root Mean Squared Error #> 0.738214 # Create factors and response probabilities actual <- factor(c(\"Class A\", \"Class B\", \"Class A\")) weights <- c(0.3,0.9,1) response <- matrix(cbind( 0.2, 0.8, 0.8, 0.2, 0.7, 0.3 ),nrow = 3, ncol = 2) cat( \"Unweighted Log Loss:\", SLmetrics::logloss( actual, response ), \"Weighted log Loss:\", SLmetrics::weighted.logloss( actual = actual, response = response, w = weights ), sep = \"\\n\" ) #> Unweighted Log Loss: #> 0.7297521 #> Weighted log Loss: #> 0.4668102"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"breaking-changes-0-3-0","dir":"Changelog","previous_headings":"","what":"Breaking Changes","title":"Version 0.3-0","text":"Weighted Confusion Matix: w-argument cmatrix() removed favor verbose weighted confusion matrix call weighted.cmatrix()-function. See , Prior version 0.3-0 weighted confusion matrix part cmatrix()-function called follows, solution, although simple, inconsistent remaining implementation weighted metrics {SLmetrics}. regain consistency simplicity weighted confusion matrix now retrieved follows,","code":"SLmetrics::cmatrix( actual = actual, predicted = predicted, w = weights ) # 1) define factors actual <- factor(sample(letters[1:3], 100, replace = TRUE)) predicted <- factor(sample(letters[1:3], 100, replace = TRUE)) weights <- runif(length(actual)) # 2) without weights SLmetrics::cmatrix( actual = actual, predicted = predicted ) #> a b c #> a 12 10 15 #> b 10 15 8 #> c 5 14 11 # 2) with weights SLmetrics::weighted.cmatrix( actual = actual, predicted = predicted, w = weights ) #> a b c #> a 3.846279 5.399945 7.226539 #> b 4.988230 7.617554 4.784221 #> c 2.959719 5.045980 4.725642"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"bug-bug-fixes-0-3-0","dir":"Changelog","previous_headings":"","what":"🐛 Bug-fixes","title":"Version 0.3-0","text":"Return named vectors: classification metrics micro == NULL returning named vectors. fixed.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"improvements-0-2-0","dir":"Changelog","previous_headings":"","what":"Improvements","title":"Version 0.2-0","text":"documentation: documentation gotten extra love, now functions formulas embedded, details section freed general description [factor] creation. make room future expansions various functions details required. weighted classification metrics: cmatrix()-function now accepts argument w sample weights; passed respective method return weighted metric. example using sample weights confusion matrix, Calculating weighted metrics manually using foo.cmatrix()-method, Please note, however, possible pass cmatix()-weighted.accurracy(), Unit-testing: functions now tested edge-cases balanced imbalanced classifcation problems, regression problems, individually. enable robust development process prevent avoidable bugs.","code":"# 1) define factors actual <- factor(sample(letters[1:3], 100, replace = TRUE)) predicted <- factor(sample(letters[1:3], 100, replace = TRUE)) weights <- runif(length(actual)) # 2) without weights SLmetrics::cmatrix( actual = actual, predicted = predicted ) #> a b c #> a 14 9 14 #> b 12 15 10 #> c 6 9 11 # 2) with weights SLmetrics::weighted.cmatrix( actual = actual, predicted = predicted, w = weights ) #> a b c #> a 6.197341 4.717194 6.122321 #> b 6.244226 7.511618 5.114025 #> c 2.417569 5.487810 5.760531 # 1) weigthed confusion matrix # and weighted accuray confusion_matrix <- SLmetrics::cmatrix( actual = actual, predicted = predicted, w = weights ) # 2) pass into accuracy # function SLmetrics::accuracy( confusion_matrix ) #> [1] 0.4 # 3) calculate the weighted # accuracy manually SLmetrics::weighted.accuracy( actual = actual, predicted = predicted, w = weights ) #> [1] 0.3927467 try( SLmetrics::weighted.accuracy( confusion_matrix ) ) #> Error in UseMethod(generic = \"weighted.accuracy\", object = ..1) : #> no applicable method for 'weighted.accuracy' applied to an object of class \"cmatrix\""},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"bug-bug-fixes-0-2-0","dir":"Changelog","previous_headings":"","what":"🐛 Bug-fixes","title":"Version 0.2-0","text":"Floating precision: Metrics give different results based method used. means foo.cmatrix() foo.factor() produce different results (See Issue https://github.com/serkor1/SLmetrics/issues/16). fixed using higher precision Rcpp::NumericMatrix instead Rcpp::IntegerMatrix. Miscalculation Confusion Matrix elements: error FN, TN, FP TP calculated fixed. issue raised bug. something caught unit-tests, total samples high spot error. , however, fixed now. means metrics uses explicitly now stable, produces desired output. Calculation Error Fowlks Mallows Index: bug calculation fmi()-function fixed. fmi()-function now correctly calculates measure. Calculation Error Pinball Deviance Concordance Correlation Coefficient: See issue https://github.com/serkor1/SLmetrics/issues/19. Switched unbiased variance calculation ccc()-function. pinball()-function missing weighted quantile function. issue now fixed. Calculation Error Balanced Accuracy: See issue https://github.com/serkor1/SLmetrics/issues/24. function now correctly adjusts random chance, result matches {scikit-learn} Calculation Error F-beta Score: See issue https://github.com/serkor1/SLmetrics/issues/23. function werent respecting na.rm micro, fixed accordingly. Calculation Error Relative Absolute Error: function incorrectly calculating means, instead sums. fixed.","code":""},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"breaking-changes-0-2-0","dir":"Changelog","previous_headings":"","what":"Breaking changes","title":"Version 0.2-0","text":"regression metrics na.rm- w-arguments removed. weighted regression metrics seperate function weighted.foo() increase consistency across metrics. See example , rrmse()-function removed favor rrse()-function. function incorrectly specified described package.","code":"# 1) define regression problem actual <- rnorm(n = 1e3) predicted <- actual + rnorm(n = 1e3) w <- runif(n = 1e3) # 2) unweighted metrics SLmetrics::rmse(actual, predicted) #> [1] 0.9989386 # 3) weighted metrics SLmetrics::weighted.rmse(actual, predicted, w = w) #> [1] 1.013139"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"general-0-1-1","dir":"Changelog","previous_headings":"","what":"General","title":"Version 0.1-1","text":"Backend changes: pair-wise metrics arer moved {Rcpp} C++, reduced execution time half. pair-wise metrics now faster.","code":""},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"improvements-0-1-1","dir":"Changelog","previous_headings":"","what":"Improvements","title":"Version 0.1-1","text":"NA-controls: pair-wise metrics doesn’t micro-argument handling missing values according C++ {Rcpp} internals. See Issue. Thank @EmilHvitfeldt pointing . now fixed functions uses na.rm-argument explicitly control . See ,","code":"# 1) define factors actual <- factor(c(\"no\", \"yes\")) predicted <- factor(c(NA, \"no\")) # 2) accuracy with na.rm = TRUE SLmetrics::accuracy( actual = actual, predicted = predicted, na.rm = TRUE ) # 2) accuracy with na.rm = FALSE SLmetrics::accuracy( actual = actual, predicted = predicted, na.rm = FALSE )"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"bug-bug-fixes-0-1-1","dir":"Changelog","previous_headings":"","what":"🐛 Bug-fixes","title":"Version 0.1-1","text":"plot.prROC()- plot.ROC()-functions now adds line plot panels = FALSE. See Issue https://github.com/serkor1/SLmetrics/issues/9.","code":"# 1) define actual # classes actual <- factor( sample(letters[1:2], size = 100, replace = TRUE) ) # 2) define response # probabilities response <- runif(100) # 3) calculate # ROC and prROC # 3.1) ROC roc <- SLmetrics::ROC( actual, response ) # 3.2) prROC prroc <- SLmetrics::prROC( actual, response ) # 4) plot with panels # FALSE par(mfrow = c(1,2)) plot( roc, panels = FALSE ) plot( prroc, panels = FALSE )"},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"general-0-1-0","dir":"Changelog","previous_headings":"","what":"General","title":"Version 0.1-0","text":"{SLmetrics} collection Machine Learning performance evaluation functions supervised learning. Visit online documentation GitHub Pages.","code":""},{"path":[]},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"supervised-classification-metrics-0-1-0","dir":"Changelog","previous_headings":"Examples","what":"Supervised classification metrics","title":"Version 0.1-0","text":"","code":"# 1) actual classes print( actual <- factor( sample(letters[1:3], size = 10, replace = TRUE) ) ) #> [1] a b a c b a a a c b #> Levels: a b c # 2) predicted classes print( predicted <- factor( sample(letters[1:3], size = 10, replace = TRUE) ) ) #> [1] b a c c c c c c a a #> Levels: a b c # 1) calculate confusion # matrix and summarise # it summary( confusion_matrix <- SLmetrics::cmatrix( actual = actual, predicted = predicted ) ) #> Confusion Matrix (3 x 3) #> ================================================================================ #> a b c #> a 0 1 4 #> b 2 0 1 #> c 1 0 1 #> ================================================================================ #> Overall Statistics (micro average) #> - Accuracy: 0.10 #> - Balanced Accuracy: 0.17 #> - Sensitivity: 0.10 #> - Specificity: 0.55 #> - Precision: 0.10 # 2) calculate false positive # rate using micro average SLmetrics::fpr( confusion_matrix ) #> a b c #> 0.6000000 0.1428571 0.6250000"},{"path":"https://serkor1.github.io/SLmetrics/news/index.html","id":"supervised-regression-metrics-0-1-0","dir":"Changelog","previous_headings":"Examples","what":"Supervised regression metrics","title":"Version 0.1-0","text":"","code":"# 1) actual values actual <- rnorm(n = 100) # 2) predicted values predicted <- actual + rnorm(n = 100) # 1) calculate # huber loss SLmetrics::huberloss( actual = actual, predicted = predicted ) #> [1] 0.4389594"}]