{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"torchfix","owner":"pytorch-labs","isFork":false,"description":"TorchFix - a linter for PyTorch-using code with autofix support","allTopics":["python","static-code-analysis","linter","static-analysis","pytorch","flake8","hacktoberfest","flake8-plugin"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":10,"starsCount":77,"forksCount":16,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-20T22:42:28.103Z"}},{"type":"Public","name":"LeanRL","owner":"pytorch-labs","isFork":false,"description":"LeanRL is a fork of CleanRL, where selected PyTorch scripts optimized for performance using compile and cudagraphs.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":1,"starsCount":280,"forksCount":8,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-20T15:41:34.174Z"}},{"type":"Public","name":"gpt-fast","owner":"pytorch-labs","isFork":false,"description":"Simple and efficient pytorch-native transformer text generation in <1000 LOC of python.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":37,"issueCount":61,"starsCount":5536,"forksCount":503,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,1,0,0,0,0,0,3,4,4,2,0,1,0,0,0,2,7,0,2,14,12,10,6,4,5,2,1,1,4,3,1,1,0,0,3,2,2,0,0,0,0,0,0,0,2,1,1,1,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-20T03:37:23.115Z"}},{"type":"Public","name":"segment-anything-fast","owner":"pytorch-labs","isFork":false,"description":"A batched offline inference oriented version of segment-anything","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":27,"starsCount":1186,"forksCount":69,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-13T22:18:13.562Z"}},{"type":"Public","name":"FACTO","owner":"pytorch-labs","isFork":false,"description":"Framework for Algorithmic Correctness Testing of Operators","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":1,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,14,0,0,0,11,0,1,1,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,2,3,0,1,0,0,0,0,2,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-27T22:00:02.873Z"}},{"type":"Public archive","name":"superblock","owner":"pytorch-labs","isFork":false,"description":"A block oriented training approach for inference time optimization.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":26,"forksCount":3,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,1,0,1,0,3,8,9,3,9,1,0,1,0,0,25,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,1,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-19T01:54:44.015Z"}},{"type":"Public","name":"applied-ai","owner":"pytorch-labs","isFork":false,"description":"Applied AI experiments and examples for PyTorch","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":8,"starsCount":128,"forksCount":11,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-17T22:03:24.662Z"}},{"type":"Public","name":"attention-gym","owner":"pytorch-labs","isFork":false,"description":"Helpful tools and examples for working with flex-attention","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":24,"starsCount":358,"forksCount":14,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,12,13,4,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-17T00:33:18.867Z"}},{"type":"Public archive","name":"float8_experimental","owner":"pytorch-labs","isFork":false,"description":"This repository contains the experimental PyTorch native float8 training UX","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":22,"issueCount":0,"starsCount":212,"forksCount":20,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-01T02:42:50.356Z"}},{"type":"Public","name":"triton-cpu","owner":"pytorch-labs","isFork":false,"description":"An experimental CPU backend for Triton (https//github.com/openai/triton)","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":31,"forksCount":3,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-13T13:35:06.140Z"}}],"repositoryCount":10,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"pytorch-labs repositories"}