{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"docs","owner":"midjourney","isFork":false,"description":"(deprecated) Source for Midjourney's official wiki","allTopics":[],"primaryLanguage":{"name":"CSS","color":"#563d7c"},"pullRequestCount":0,"issueCount":2,"starsCount":387,"forksCount":71,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-22T00:30:55.544Z"}},{"type":"Public","name":"flash-attention-jax","owner":"midjourney","isFork":true,"description":"Implementation of Flash Attention in Jax","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":23,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-17T04:26:48.426Z"}},{"type":"Public","name":"nanobind","owner":"midjourney","isFork":true,"description":"nanobind: tiny and efficient C++/Python bindings","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":190,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-15T15:12:39.059Z"}},{"type":"Public","name":"hf-transformers","owner":"midjourney","isFork":true,"description":"🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":26423,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-14T06:39:26.242Z"}},{"type":"Public","name":"TransformerEngine","owner":"midjourney","isFork":true,"description":"A library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada GPUs, to provide better performance with lower memory utilization in both training and inference.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":306,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-11T12:54:30.456Z"}},{"type":"Public","name":"transformers","owner":"midjourney","isFork":true,"description":"🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":55,"forksCount":26423,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-01T07:48:23.452Z"}},{"type":"Public archive","name":"equinox","owner":"midjourney","isFork":true,"description":"Elegant easy-to-use neural networks in JAX. https://docs.kidger.site/equinox/","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":136,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-01T21:08:23.162Z"}},{"type":"Public","name":"einops","owner":"midjourney","isFork":true,"description":"Deep learning operations reinvented (for pytorch, tensorflow, jax and others)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":6,"forksCount":348,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-12-29T06:10:39.650Z"}},{"type":"Public","name":"flash-attention","owner":"midjourney","isFork":true,"description":"Fast and memory-efficient exact attention","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":1243,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-12-17T19:22:06.694Z"}},{"type":"Public","name":"jax","owner":"midjourney","isFork":true,"description":"Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":13,"forksCount":2752,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-12T04:45:46.016Z"}},{"type":"Public","name":"flax","owner":"midjourney","isFork":true,"description":"Flax is a neural network library for JAX that is designed for flexibility.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":631,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-09-17T19:46:20.965Z"}}],"repositoryCount":11,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"midjourney repositories"}